var/home/core/zuul-output/0000755000175000017500000000000015111107410014514 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111120310015452 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004777032615111120302017677 0ustar rootrootNov 24 17:03:23 crc systemd[1]: Starting Kubernetes Kubelet... Nov 24 17:03:23 crc restorecon[4702]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:23 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 17:03:24 crc restorecon[4702]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 24 17:03:25 crc kubenswrapper[4760]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.154307 4760 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.168932 4760 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.168986 4760 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.168997 4760 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169028 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169038 4760 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169046 4760 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169054 4760 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169063 4760 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169071 4760 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169081 4760 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169089 4760 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169097 4760 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169105 4760 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169113 4760 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169124 4760 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169133 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169141 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169148 4760 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169156 4760 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169164 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169172 4760 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169179 4760 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169187 4760 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169195 4760 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169203 4760 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169211 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169237 4760 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169245 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169252 4760 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169260 4760 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169284 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169292 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169300 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169308 4760 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169315 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169326 4760 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169336 4760 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169344 4760 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169353 4760 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169361 4760 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169369 4760 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169379 4760 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169391 4760 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169400 4760 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169409 4760 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169417 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169425 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169433 4760 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169441 4760 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169449 4760 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169457 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169465 4760 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169472 4760 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169480 4760 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169487 4760 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169495 4760 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169503 4760 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169511 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169518 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169526 4760 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169534 4760 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169541 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169587 4760 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169596 4760 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169604 4760 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169612 4760 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169620 4760 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169630 4760 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169637 4760 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169645 4760 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.169653 4760 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172514 4760 flags.go:64] FLAG: --address="0.0.0.0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172552 4760 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172576 4760 flags.go:64] FLAG: --anonymous-auth="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172591 4760 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172607 4760 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172619 4760 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172636 4760 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172651 4760 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172663 4760 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172674 4760 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172687 4760 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172703 4760 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172715 4760 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172727 4760 flags.go:64] FLAG: --cgroup-root="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172738 4760 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172750 4760 flags.go:64] FLAG: --client-ca-file="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172761 4760 flags.go:64] FLAG: --cloud-config="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172772 4760 flags.go:64] FLAG: --cloud-provider="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172784 4760 flags.go:64] FLAG: --cluster-dns="[]" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172798 4760 flags.go:64] FLAG: --cluster-domain="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172809 4760 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172821 4760 flags.go:64] FLAG: --config-dir="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172833 4760 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172846 4760 flags.go:64] FLAG: --container-log-max-files="5" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172861 4760 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172873 4760 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172885 4760 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172897 4760 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172913 4760 flags.go:64] FLAG: --contention-profiling="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172924 4760 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172936 4760 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172948 4760 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172959 4760 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172974 4760 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172986 4760 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.172997 4760 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173045 4760 flags.go:64] FLAG: --enable-load-reader="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173058 4760 flags.go:64] FLAG: --enable-server="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173069 4760 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173089 4760 flags.go:64] FLAG: --event-burst="100" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173101 4760 flags.go:64] FLAG: --event-qps="50" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173112 4760 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173123 4760 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173135 4760 flags.go:64] FLAG: --eviction-hard="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173151 4760 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173163 4760 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173173 4760 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173187 4760 flags.go:64] FLAG: --eviction-soft="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173199 4760 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173211 4760 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173223 4760 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173235 4760 flags.go:64] FLAG: --experimental-mounter-path="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173249 4760 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173261 4760 flags.go:64] FLAG: --fail-swap-on="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173273 4760 flags.go:64] FLAG: --feature-gates="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173288 4760 flags.go:64] FLAG: --file-check-frequency="20s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173300 4760 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173312 4760 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173324 4760 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173337 4760 flags.go:64] FLAG: --healthz-port="10248" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173349 4760 flags.go:64] FLAG: --help="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173361 4760 flags.go:64] FLAG: --hostname-override="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173372 4760 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173384 4760 flags.go:64] FLAG: --http-check-frequency="20s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173396 4760 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173407 4760 flags.go:64] FLAG: --image-credential-provider-config="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173418 4760 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173429 4760 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173440 4760 flags.go:64] FLAG: --image-service-endpoint="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173451 4760 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173463 4760 flags.go:64] FLAG: --kube-api-burst="100" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173475 4760 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173487 4760 flags.go:64] FLAG: --kube-api-qps="50" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173498 4760 flags.go:64] FLAG: --kube-reserved="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173510 4760 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173521 4760 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173533 4760 flags.go:64] FLAG: --kubelet-cgroups="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173543 4760 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173588 4760 flags.go:64] FLAG: --lock-file="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173598 4760 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173608 4760 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173617 4760 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173646 4760 flags.go:64] FLAG: --log-json-split-stream="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173658 4760 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173668 4760 flags.go:64] FLAG: --log-text-split-stream="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173678 4760 flags.go:64] FLAG: --logging-format="text" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173687 4760 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173697 4760 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173706 4760 flags.go:64] FLAG: --manifest-url="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173715 4760 flags.go:64] FLAG: --manifest-url-header="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173728 4760 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173737 4760 flags.go:64] FLAG: --max-open-files="1000000" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173749 4760 flags.go:64] FLAG: --max-pods="110" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173759 4760 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173768 4760 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173777 4760 flags.go:64] FLAG: --memory-manager-policy="None" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173786 4760 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173796 4760 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173805 4760 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173815 4760 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173838 4760 flags.go:64] FLAG: --node-status-max-images="50" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173847 4760 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173856 4760 flags.go:64] FLAG: --oom-score-adj="-999" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173866 4760 flags.go:64] FLAG: --pod-cidr="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173874 4760 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173888 4760 flags.go:64] FLAG: --pod-manifest-path="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173897 4760 flags.go:64] FLAG: --pod-max-pids="-1" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173906 4760 flags.go:64] FLAG: --pods-per-core="0" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173915 4760 flags.go:64] FLAG: --port="10250" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173925 4760 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173934 4760 flags.go:64] FLAG: --provider-id="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173943 4760 flags.go:64] FLAG: --qos-reserved="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173952 4760 flags.go:64] FLAG: --read-only-port="10255" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173961 4760 flags.go:64] FLAG: --register-node="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173969 4760 flags.go:64] FLAG: --register-schedulable="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173979 4760 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.173994 4760 flags.go:64] FLAG: --registry-burst="10" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174035 4760 flags.go:64] FLAG: --registry-qps="5" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174045 4760 flags.go:64] FLAG: --reserved-cpus="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174056 4760 flags.go:64] FLAG: --reserved-memory="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174068 4760 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174078 4760 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174087 4760 flags.go:64] FLAG: --rotate-certificates="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174096 4760 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174105 4760 flags.go:64] FLAG: --runonce="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174115 4760 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174126 4760 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174138 4760 flags.go:64] FLAG: --seccomp-default="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174149 4760 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174161 4760 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174174 4760 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174187 4760 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174199 4760 flags.go:64] FLAG: --storage-driver-password="root" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174210 4760 flags.go:64] FLAG: --storage-driver-secure="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174222 4760 flags.go:64] FLAG: --storage-driver-table="stats" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174231 4760 flags.go:64] FLAG: --storage-driver-user="root" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174240 4760 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174250 4760 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174261 4760 flags.go:64] FLAG: --system-cgroups="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174272 4760 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174293 4760 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174305 4760 flags.go:64] FLAG: --tls-cert-file="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174317 4760 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174332 4760 flags.go:64] FLAG: --tls-min-version="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174343 4760 flags.go:64] FLAG: --tls-private-key-file="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174353 4760 flags.go:64] FLAG: --topology-manager-policy="none" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174362 4760 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174371 4760 flags.go:64] FLAG: --topology-manager-scope="container" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174382 4760 flags.go:64] FLAG: --v="2" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174393 4760 flags.go:64] FLAG: --version="false" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174405 4760 flags.go:64] FLAG: --vmodule="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174416 4760 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.174444 4760 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174681 4760 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174691 4760 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174701 4760 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174709 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174719 4760 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174727 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174735 4760 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174743 4760 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174750 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174759 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174766 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174774 4760 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174782 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174789 4760 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174798 4760 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174806 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174814 4760 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174825 4760 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174835 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174844 4760 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174853 4760 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174861 4760 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174871 4760 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174879 4760 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174887 4760 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174896 4760 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174906 4760 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174918 4760 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174927 4760 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174936 4760 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174947 4760 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174957 4760 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174966 4760 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174975 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174983 4760 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.174992 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175034 4760 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175045 4760 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175055 4760 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175064 4760 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175072 4760 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175081 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175089 4760 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175097 4760 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175105 4760 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175113 4760 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175121 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175129 4760 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175137 4760 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175144 4760 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175153 4760 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175162 4760 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175170 4760 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175178 4760 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175186 4760 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175194 4760 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175203 4760 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175211 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175219 4760 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175227 4760 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175235 4760 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175244 4760 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175253 4760 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175261 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175269 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175277 4760 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175286 4760 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175294 4760 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175303 4760 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175312 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.175320 4760 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.175334 4760 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.195810 4760 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.195886 4760 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196077 4760 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196098 4760 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196109 4760 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196119 4760 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196129 4760 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196138 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196146 4760 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196155 4760 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196165 4760 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196174 4760 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196182 4760 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196190 4760 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196198 4760 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196207 4760 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196216 4760 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196224 4760 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196233 4760 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196242 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196251 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196259 4760 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196271 4760 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196282 4760 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196291 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196300 4760 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196309 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196320 4760 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196328 4760 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196337 4760 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196346 4760 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196356 4760 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196365 4760 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196374 4760 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196382 4760 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196391 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196402 4760 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196410 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196419 4760 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196428 4760 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196436 4760 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196446 4760 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196454 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196462 4760 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196471 4760 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196479 4760 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196491 4760 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196500 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196509 4760 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196518 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196526 4760 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196534 4760 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196543 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196551 4760 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196560 4760 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196572 4760 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196583 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196592 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196600 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196609 4760 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196618 4760 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196626 4760 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196635 4760 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196643 4760 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196652 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196660 4760 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196668 4760 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196677 4760 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196686 4760 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196694 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196702 4760 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196711 4760 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.196721 4760 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.196737 4760 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197028 4760 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197041 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197051 4760 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197062 4760 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197070 4760 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197079 4760 feature_gate.go:330] unrecognized feature gate: Example Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197089 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197099 4760 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197108 4760 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197117 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197127 4760 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197136 4760 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197145 4760 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197154 4760 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197163 4760 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197171 4760 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197180 4760 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197189 4760 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197198 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197207 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197215 4760 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197224 4760 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197232 4760 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197240 4760 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197249 4760 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197260 4760 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197270 4760 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197279 4760 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197288 4760 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197297 4760 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197309 4760 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197319 4760 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197329 4760 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197338 4760 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197349 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197358 4760 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197366 4760 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197375 4760 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197384 4760 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197392 4760 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197404 4760 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197414 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197423 4760 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197432 4760 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197440 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197450 4760 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197460 4760 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197471 4760 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197480 4760 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197493 4760 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197504 4760 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197514 4760 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197523 4760 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197534 4760 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197567 4760 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197579 4760 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197588 4760 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197597 4760 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197605 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197614 4760 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197629 4760 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197638 4760 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197646 4760 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197655 4760 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197664 4760 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197672 4760 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197681 4760 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197695 4760 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197704 4760 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197712 4760 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.197722 4760 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.197737 4760 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.198096 4760 server.go:940] "Client rotation is on, will bootstrap in background" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.204880 4760 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.205063 4760 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.207811 4760 server.go:997] "Starting client certificate rotation" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.207863 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.208167 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-22 13:26:49.098441139 +0000 UTC Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.208353 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.235325 4760 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.238809 4760 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.239650 4760 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.260188 4760 log.go:25] "Validated CRI v1 runtime API" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.303559 4760 log.go:25] "Validated CRI v1 image API" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.307917 4760 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.316828 4760 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-24-16-58-22-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.316878 4760 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.342613 4760 manager.go:217] Machine: {Timestamp:2025-11-24 17:03:25.340134607 +0000 UTC m=+0.663016227 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:17737b2a-2300-48a8-a1cc-45163d19bbaa BootID:c065c256-59f6-47bf-8461-0f224e5ef7ad Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:6b:49:64 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:6b:49:64 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:83:31:2b Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:e5:15:6a Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e5:84:f2 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:8a:54:81 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:ea:90:06:71:72:7b Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:fe:ef:20:1a:77:cb Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.342994 4760 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.343330 4760 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.344961 4760 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.345293 4760 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.345353 4760 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.346614 4760 topology_manager.go:138] "Creating topology manager with none policy" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.346651 4760 container_manager_linux.go:303] "Creating device plugin manager" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.347414 4760 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.347449 4760 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.347825 4760 state_mem.go:36] "Initialized new in-memory state store" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.348052 4760 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.351941 4760 kubelet.go:418] "Attempting to sync node with API server" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.351981 4760 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.352040 4760 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.352063 4760 kubelet.go:324] "Adding apiserver pod source" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.352110 4760 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.356633 4760 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.358795 4760 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.360534 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.360661 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.360722 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.360912 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.361945 4760 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365099 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365150 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365167 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365184 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365208 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365223 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365238 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365262 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365282 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365297 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365346 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365361 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.365407 4760 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.366417 4760 server.go:1280] "Started kubelet" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.366672 4760 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.366989 4760 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.367951 4760 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.368801 4760 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:25 crc systemd[1]: Started Kubernetes Kubelet. Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.381399 4760 server.go:460] "Adding debug handlers to kubelet server" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.388158 4760 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.230:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b001d694ca326 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-24 17:03:25.366362918 +0000 UTC m=+0.689244498,LastTimestamp:2025-11-24 17:03:25.366362918 +0000 UTC m=+0.689244498,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.390884 4760 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.390982 4760 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.391243 4760 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 12:22:20.123292158 +0000 UTC Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.391430 4760 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 211h18m54.731869886s for next certificate rotation Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.391534 4760 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.391580 4760 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.391537 4760 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.391705 4760 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.392277 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.230:6443: connect: connection refused" interval="200ms" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.392572 4760 factory.go:55] Registering systemd factory Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.392624 4760 factory.go:221] Registration of the systemd container factory successfully Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.392620 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.392713 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.394492 4760 factory.go:153] Registering CRI-O factory Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.394532 4760 factory.go:221] Registration of the crio container factory successfully Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.394653 4760 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.394697 4760 factory.go:103] Registering Raw factory Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.394724 4760 manager.go:1196] Started watching for new ooms in manager Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.396472 4760 manager.go:319] Starting recovery of all containers Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403372 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403477 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403502 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403523 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403543 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403563 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403584 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403604 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403627 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403646 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403665 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403688 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403709 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403734 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403757 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403778 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403833 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403852 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403873 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403891 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403909 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403929 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.403947 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404131 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404156 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404175 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404200 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404222 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404280 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404303 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404323 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404345 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404366 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404388 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404414 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404434 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404453 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404475 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404493 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404516 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404538 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404559 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404579 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404598 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404618 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404636 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404655 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404676 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404697 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404716 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404737 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404759 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404789 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404874 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404898 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404920 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404940 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404958 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.404977 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405074 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405093 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405111 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405135 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405156 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405175 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405196 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405215 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405233 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405254 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405273 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405292 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405310 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405333 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405352 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405371 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405391 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405408 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405427 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405444 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405467 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405485 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405504 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405554 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405576 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405594 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405616 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405637 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405658 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405677 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405695 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405713 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405732 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405751 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405772 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405794 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405815 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405835 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405856 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405876 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405894 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405917 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405936 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405956 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.405976 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406119 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406142 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406162 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406183 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406203 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406226 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406247 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406271 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406288 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406309 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406328 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406348 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406369 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406389 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406410 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406428 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406447 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406469 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406487 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406506 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406524 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406544 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406608 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406633 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406651 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406669 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406687 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406705 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406722 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406741 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406758 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406781 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406799 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406818 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406836 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406854 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406871 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406890 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406909 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406929 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406949 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406967 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.406986 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407029 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407051 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407071 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407092 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407112 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407130 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407149 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407168 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407186 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407204 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407224 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407247 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407269 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407289 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407307 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407324 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407343 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407361 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407380 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.407399 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410561 4760 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410606 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410629 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410648 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410667 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410689 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410708 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410728 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410749 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410768 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410786 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410805 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410826 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410846 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410868 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410887 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410905 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410923 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410941 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410961 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410980 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.410999 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411050 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411071 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411095 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411114 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411135 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411153 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411172 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411190 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411209 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411230 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411704 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411729 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411748 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411765 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411783 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411801 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411820 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411839 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411864 4760 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411883 4760 reconstruct.go:97] "Volume reconstruction finished" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.411895 4760 reconciler.go:26] "Reconciler: start to sync state" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.429346 4760 manager.go:324] Recovery completed Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.445995 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.447780 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.447994 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.448167 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.449449 4760 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.449663 4760 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.449823 4760 state_mem.go:36] "Initialized new in-memory state store" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.461947 4760 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.465018 4760 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.465074 4760 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.465115 4760 kubelet.go:2335] "Starting kubelet main sync loop" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.465176 4760 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.466136 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.466240 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.478442 4760 policy_none.go:49] "None policy: Start" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.479579 4760 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.479629 4760 state_mem.go:35] "Initializing new in-memory state store" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.492103 4760 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.536724 4760 manager.go:334] "Starting Device Plugin manager" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.536815 4760 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.536844 4760 server.go:79] "Starting device plugin registration server" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.537846 4760 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.537885 4760 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.538635 4760 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.538837 4760 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.538864 4760 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.550323 4760 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.565617 4760 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.565736 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.567461 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.567513 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.567541 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.567797 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.568346 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.568470 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.569209 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.569282 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.569304 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.569725 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.569940 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.570115 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.570190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.570264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.570289 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571660 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571718 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571742 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571757 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.571775 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.572065 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.572449 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.572509 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573391 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573449 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573464 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573711 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573884 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.573941 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.574804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.574850 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.574918 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575579 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575601 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575556 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575685 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.575710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.576071 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.576113 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.577240 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.577288 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.577309 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.593065 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.230:6443: connect: connection refused" interval="400ms" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.615743 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.615809 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.615851 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.615885 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.615921 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616045 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616123 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616181 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616231 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616282 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616329 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616417 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616490 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616529 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.616569 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.638036 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.639311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.639384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.639412 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.639462 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.640249 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.230:6443: connect: connection refused" node="crc" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.685071 4760 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.230:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b001d694ca326 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-24 17:03:25.366362918 +0000 UTC m=+0.689244498,LastTimestamp:2025-11-24 17:03:25.366362918 +0000 UTC m=+0.689244498,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718253 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718345 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718388 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718424 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718460 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718523 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718592 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718493 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718618 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718719 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718634 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718772 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718841 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718863 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718876 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718918 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718949 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.718991 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719067 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719102 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719142 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719147 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719174 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719175 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719210 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719497 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719599 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719248 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719100 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.719216 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.841279 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.843193 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.843285 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.843305 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.843349 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.844277 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.230:6443: connect: connection refused" node="crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.913336 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.936636 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.948907 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.960614 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-abbc5ac6a4830495fcd7d18476cbd5cf33e2246cf01ed032b1a022c8ddbf821d WatchSource:0}: Error finding container abbc5ac6a4830495fcd7d18476cbd5cf33e2246cf01ed032b1a022c8ddbf821d: Status 404 returned error can't find the container with id abbc5ac6a4830495fcd7d18476cbd5cf33e2246cf01ed032b1a022c8ddbf821d Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.973914 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.974352 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-fa93cef7fcf34279e0e49cb0ad8bfb71e37a6a2ae3a9241302fed20a26cbafe8 WatchSource:0}: Error finding container fa93cef7fcf34279e0e49cb0ad8bfb71e37a6a2ae3a9241302fed20a26cbafe8: Status 404 returned error can't find the container with id fa93cef7fcf34279e0e49cb0ad8bfb71e37a6a2ae3a9241302fed20a26cbafe8 Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.977856 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-c02b9cb181008edc856efa6478d9a89b628c6b5b7c55b4ce863a3505e2155431 WatchSource:0}: Error finding container c02b9cb181008edc856efa6478d9a89b628c6b5b7c55b4ce863a3505e2155431: Status 404 returned error can't find the container with id c02b9cb181008edc856efa6478d9a89b628c6b5b7c55b4ce863a3505e2155431 Nov 24 17:03:25 crc kubenswrapper[4760]: I1124 17:03:25.991295 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:25 crc kubenswrapper[4760]: W1124 17:03:25.992569 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-959c0d5db3e1084513a5236245ba6c228410f9ea0c1dbcacb81c4bd68468758b WatchSource:0}: Error finding container 959c0d5db3e1084513a5236245ba6c228410f9ea0c1dbcacb81c4bd68468758b: Status 404 returned error can't find the container with id 959c0d5db3e1084513a5236245ba6c228410f9ea0c1dbcacb81c4bd68468758b Nov 24 17:03:25 crc kubenswrapper[4760]: E1124 17:03:25.994913 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.230:6443: connect: connection refused" interval="800ms" Nov 24 17:03:26 crc kubenswrapper[4760]: W1124 17:03:26.030569 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-ccd913215a1f29eafcafdb687edd58012baa53527657780d9653465c019981fb WatchSource:0}: Error finding container ccd913215a1f29eafcafdb687edd58012baa53527657780d9653465c019981fb: Status 404 returned error can't find the container with id ccd913215a1f29eafcafdb687edd58012baa53527657780d9653465c019981fb Nov 24 17:03:26 crc kubenswrapper[4760]: W1124 17:03:26.217186 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.217328 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.245428 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.247335 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.247408 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.247446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.247489 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.248257 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.230:6443: connect: connection refused" node="crc" Nov 24 17:03:26 crc kubenswrapper[4760]: W1124 17:03:26.339266 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.339427 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.370318 4760 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.471173 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ccd913215a1f29eafcafdb687edd58012baa53527657780d9653465c019981fb"} Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.472921 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"959c0d5db3e1084513a5236245ba6c228410f9ea0c1dbcacb81c4bd68468758b"} Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.474540 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c02b9cb181008edc856efa6478d9a89b628c6b5b7c55b4ce863a3505e2155431"} Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.476257 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fa93cef7fcf34279e0e49cb0ad8bfb71e37a6a2ae3a9241302fed20a26cbafe8"} Nov 24 17:03:26 crc kubenswrapper[4760]: I1124 17:03:26.477809 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"abbc5ac6a4830495fcd7d18476cbd5cf33e2246cf01ed032b1a022c8ddbf821d"} Nov 24 17:03:26 crc kubenswrapper[4760]: W1124 17:03:26.616206 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.616360 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.796523 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.230:6443: connect: connection refused" interval="1.6s" Nov 24 17:03:26 crc kubenswrapper[4760]: W1124 17:03:26.949280 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:26 crc kubenswrapper[4760]: E1124 17:03:26.949383 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.048728 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.050555 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.050610 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.050628 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.050669 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:27 crc kubenswrapper[4760]: E1124 17:03:27.051179 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.230:6443: connect: connection refused" node="crc" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.343955 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 17:03:27 crc kubenswrapper[4760]: E1124 17:03:27.345712 4760 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.369935 4760 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.486882 4760 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd" exitCode=0 Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.487029 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.487097 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.488651 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.488722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.488741 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.491750 4760 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cf46ec4f474067867bdccbe175bab606e64e52f2be10db61bddc14e50bfb15bf" exitCode=0 Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.491899 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cf46ec4f474067867bdccbe175bab606e64e52f2be10db61bddc14e50bfb15bf"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.492057 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.492415 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.493651 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.493715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.493737 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.494303 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.494356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.494376 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.495081 4760 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa" exitCode=0 Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.495202 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.495215 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.497144 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.497215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.497237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.501144 4760 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b" exitCode=0 Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.501267 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.502157 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.503658 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.503725 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.503744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.505737 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.505803 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb"} Nov 24 17:03:27 crc kubenswrapper[4760]: I1124 17:03:27.505826 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f"} Nov 24 17:03:28 crc kubenswrapper[4760]: W1124 17:03:28.157579 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:28 crc kubenswrapper[4760]: E1124 17:03:28.157685 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:28 crc kubenswrapper[4760]: W1124 17:03:28.282225 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:28 crc kubenswrapper[4760]: E1124 17:03:28.282352 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.230:6443: connect: connection refused" logger="UnhandledError" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.370145 4760 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.230:6443: connect: connection refused Nov 24 17:03:28 crc kubenswrapper[4760]: E1124 17:03:28.398442 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.230:6443: connect: connection refused" interval="3.2s" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.522335 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.522398 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.522413 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.522431 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.525184 4760 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ff15552938376927a600c282a1ca05c3ab2d747030a60fe8ccefd345e8823025" exitCode=0 Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.525281 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ff15552938376927a600c282a1ca05c3ab2d747030a60fe8ccefd345e8823025"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.525397 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.526517 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.526543 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.526552 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.528600 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.528996 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.529941 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.529967 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.529980 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.533322 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.533354 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.533365 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.533369 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.534801 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.534857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.534873 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.537903 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb"} Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.537987 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.538886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.538928 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.538941 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.651914 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.653282 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.653328 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.653338 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.653371 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:28 crc kubenswrapper[4760]: E1124 17:03:28.653971 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.230:6443: connect: connection refused" node="crc" Nov 24 17:03:28 crc kubenswrapper[4760]: I1124 17:03:28.991409 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.548666 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1"} Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.548877 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.551087 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.551144 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.551162 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.554812 4760 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7ce760eb389545172293592a92fe00d55a20a73c3f751e79c511a185deda8c64" exitCode=0 Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555049 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555105 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555112 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555222 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555402 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.555342 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7ce760eb389545172293592a92fe00d55a20a73c3f751e79c511a185deda8c64"} Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.556945 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.556995 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557040 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557059 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557093 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557110 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.556945 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557234 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557295 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557318 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:29 crc kubenswrapper[4760]: I1124 17:03:29.557338 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564486 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d8d9dc83ddaa0575fcce5c6d52f301cc7cf27f612969d7ebd134b89775d18160"} Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564570 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f62a95f442d2e2ed0aeeaaaa911d35bbd8ae02e66c883986cf500ca0123faa0b"} Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564585 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564610 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564591 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d9e9519f3d447162a0af5da72c24fb33bb5c0912d055517179bb24be41dc2469"} Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.564685 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566331 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566404 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566608 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.566627 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.569383 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:30 crc kubenswrapper[4760]: I1124 17:03:30.592429 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.467608 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.575845 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f44d1997fbef9df69ff79f8a46a2435ddcbf206b6e1a9c760413a3b4a709af69"} Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.575938 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9e8d29b195eff9ad4a1455a69a8c416adb7091abeb2df53e2603aa1dfa57a985"} Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.575957 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.576201 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.577108 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.577177 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.577826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.577886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.577912 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.578235 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.578290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.578303 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.579084 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.579113 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.579124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.720375 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.855242 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.857316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.857369 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.857383 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:31 crc kubenswrapper[4760]: I1124 17:03:31.857414 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.580169 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.580266 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.580197 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582122 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582184 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582269 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582194 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:32 crc kubenswrapper[4760]: I1124 17:03:32.582395 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.430234 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.431057 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.432902 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.432991 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.433043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.737682 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.738195 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.746733 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.746807 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:34 crc kubenswrapper[4760]: I1124 17:03:34.746839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.264376 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.264669 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.266413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.266502 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.266529 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:35 crc kubenswrapper[4760]: E1124 17:03:35.550942 4760 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.825822 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.826169 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.827824 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.827887 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:35 crc kubenswrapper[4760]: I1124 17:03:35.827905 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.358824 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.359194 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.360909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.360959 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.360976 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.368221 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.593475 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.594960 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.595063 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.595086 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:36 crc kubenswrapper[4760]: I1124 17:03:36.600360 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:37 crc kubenswrapper[4760]: I1124 17:03:37.596895 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:37 crc kubenswrapper[4760]: I1124 17:03:37.598688 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:37 crc kubenswrapper[4760]: I1124 17:03:37.598759 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:37 crc kubenswrapper[4760]: I1124 17:03:37.598790 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:38 crc kubenswrapper[4760]: I1124 17:03:38.539116 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:38 crc kubenswrapper[4760]: I1124 17:03:38.600166 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:38 crc kubenswrapper[4760]: I1124 17:03:38.601933 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:38 crc kubenswrapper[4760]: I1124 17:03:38.602056 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:38 crc kubenswrapper[4760]: I1124 17:03:38.602082 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:39 crc kubenswrapper[4760]: W1124 17:03:39.218210 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.218358 4760 trace.go:236] Trace[382020513]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:03:29.216) (total time: 10001ms): Nov 24 17:03:39 crc kubenswrapper[4760]: Trace[382020513]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:03:39.218) Nov 24 17:03:39 crc kubenswrapper[4760]: Trace[382020513]: [10.001730952s] [10.001730952s] END Nov 24 17:03:39 crc kubenswrapper[4760]: E1124 17:03:39.218395 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 24 17:03:39 crc kubenswrapper[4760]: W1124 17:03:39.302668 4760 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.302789 4760 trace.go:236] Trace[1805585605]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:03:29.301) (total time: 10001ms): Nov 24 17:03:39 crc kubenswrapper[4760]: Trace[1805585605]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:03:39.302) Nov 24 17:03:39 crc kubenswrapper[4760]: Trace[1805585605]: [10.00156928s] [10.00156928s] END Nov 24 17:03:39 crc kubenswrapper[4760]: E1124 17:03:39.302819 4760 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.371042 4760 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.605961 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.608508 4760 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1" exitCode=255 Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.608570 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1"} Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.608787 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.609922 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.609982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.610029 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:39 crc kubenswrapper[4760]: I1124 17:03:39.610845 4760 scope.go:117] "RemoveContainer" containerID="5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.170739 4760 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.170857 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.180955 4760 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.181023 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.576113 4760 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]log ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]etcd ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/generic-apiserver-start-informers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/priority-and-fairness-filter ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-apiextensions-informers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-apiextensions-controllers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/crd-informer-synced ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-system-namespaces-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 24 17:03:40 crc kubenswrapper[4760]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/bootstrap-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/start-kube-aggregator-informers ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-registration-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-discovery-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]autoregister-completion ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-openapi-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 24 17:03:40 crc kubenswrapper[4760]: livez check failed Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.576201 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.615387 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.618245 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179"} Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.618637 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.620219 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.620294 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:40 crc kubenswrapper[4760]: I1124 17:03:40.620324 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:41 crc kubenswrapper[4760]: I1124 17:03:41.539228 4760 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 17:03:41 crc kubenswrapper[4760]: I1124 17:03:41.539356 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 24 17:03:43 crc kubenswrapper[4760]: I1124 17:03:43.434478 4760 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.494767 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.495043 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.496808 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.496840 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.496852 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.517538 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.631954 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.633567 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.633752 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:44 crc kubenswrapper[4760]: I1124 17:03:44.633927 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:45 crc kubenswrapper[4760]: E1124 17:03:45.163236 4760 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.177570 4760 trace.go:236] Trace[2102873302]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:03:32.224) (total time: 12953ms): Nov 24 17:03:45 crc kubenswrapper[4760]: Trace[2102873302]: ---"Objects listed" error: 12953ms (17:03:45.177) Nov 24 17:03:45 crc kubenswrapper[4760]: Trace[2102873302]: [12.953468406s] [12.953468406s] END Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.177926 4760 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.182949 4760 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.183197 4760 trace.go:236] Trace[1452331703]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 17:03:33.042) (total time: 12140ms): Nov 24 17:03:45 crc kubenswrapper[4760]: Trace[1452331703]: ---"Objects listed" error: 12140ms (17:03:45.183) Nov 24 17:03:45 crc kubenswrapper[4760]: Trace[1452331703]: [12.140940091s] [12.140940091s] END Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.183227 4760 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.188131 4760 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 24 17:03:45 crc kubenswrapper[4760]: E1124 17:03:45.188607 4760 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.211328 4760 csr.go:261] certificate signing request csr-v98fz is approved, waiting to be issued Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.221528 4760 csr.go:257] certificate signing request csr-v98fz is issued Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.385884 4760 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.580492 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.581680 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.593378 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.636315 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.637162 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.638807 4760 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" exitCode=255 Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.638855 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179"} Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.638922 4760 scope.go:117] "RemoveContainer" containerID="5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1" Nov 24 17:03:45 crc kubenswrapper[4760]: E1124 17:03:45.656621 4760 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:45 crc kubenswrapper[4760]: I1124 17:03:45.657062 4760 scope.go:117] "RemoveContainer" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" Nov 24 17:03:45 crc kubenswrapper[4760]: E1124 17:03:45.657367 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.223890 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-24 16:58:45 +0000 UTC, rotation deadline is 2026-08-26 23:21:32.182628369 +0000 UTC Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.223937 4760 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6606h17m45.958693966s for next certificate rotation Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.365422 4760 apiserver.go:52] "Watching apiserver" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.375768 4760 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.376617 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-49579","openshift-multus/multus-additional-cni-plugins-v5p49","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-operator/iptables-alerter-4ln5h","openshift-kube-apiserver/kube-apiserver-crc","openshift-machine-config-operator/machine-config-daemon-vgbxz","openshift-multus/multus-8x59s","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.377230 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.377420 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.377508 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.377663 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.377710 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378018 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378097 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378129 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378247 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.378390 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378893 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.378973 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.379685 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.383044 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.383125 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.384791 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.384938 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385151 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385362 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385446 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385773 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385780 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.385859 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.386025 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.386380 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.386669 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.386814 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.386998 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387154 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387278 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387430 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387528 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387565 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387727 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.387892 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.388072 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.388219 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.392748 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cni-binary-copy\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393477 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-kubelet\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393449 4760 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393589 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-multus-certs\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393625 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-hostroot\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393649 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-daemon-config\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393672 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-netns\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393875 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-multus\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393918 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lghj\" (UniqueName: \"kubernetes.io/projected/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-kube-api-access-7lghj\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393943 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393966 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.393983 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-system-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394014 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cnibin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394030 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394061 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394080 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-etc-kubernetes\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394098 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-k8s-cni-cncf-io\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394135 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-conf-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394160 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-socket-dir-parent\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394240 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-bin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394327 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.394374 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-os-release\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.395226 4760 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.402314 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.403034 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.408886 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.408936 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.408954 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.409086 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:46.909056112 +0000 UTC m=+22.231937682 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.411340 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.417389 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.426201 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.442057 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.456900 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:39Z\\\",\\\"message\\\":\\\"W1124 17:03:28.738418 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1124 17:03:28.739104 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764003808 cert, and key in /tmp/serving-cert-1369365578/serving-signer.crt, /tmp/serving-cert-1369365578/serving-signer.key\\\\nI1124 17:03:29.116761 1 observer_polling.go:159] Starting file observer\\\\nW1124 17:03:29.121838 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1124 17:03:29.122093 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:29.125457 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1369365578/tls.crt::/tmp/serving-cert-1369365578/tls.key\\\\\\\"\\\\nF1124 17:03:39.461521 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.471420 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.494876 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.494949 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495050 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495070 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495348 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495377 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495373 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495464 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495637 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495705 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495821 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495942 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495966 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.495997 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496035 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496070 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496154 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496227 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496306 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496324 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496355 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496391 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496419 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496434 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496447 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496479 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496517 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496588 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496599 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496615 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496641 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496662 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496668 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496705 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496728 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496748 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496770 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496815 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496822 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496857 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496885 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496908 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496912 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496918 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496930 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496971 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.496992 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497019 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497030 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497050 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497066 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497081 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497097 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497114 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497130 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497147 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497167 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497187 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497203 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497219 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497241 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497261 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497286 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497310 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497313 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497353 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497380 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497398 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497418 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497436 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497459 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497478 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497496 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497516 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497518 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497533 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497554 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497574 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497597 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497641 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497572 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.497609 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.498369 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.498463 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.498689 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.498715 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.498985 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499267 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499320 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499457 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499515 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499670 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499811 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499855 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499875 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499878 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.499969 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500066 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500377 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500466 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500648 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500728 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500735 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500784 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500805 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.500916 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.501076 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.501186 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502119 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.501446 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.501647 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502194 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.501630 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502360 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502397 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502425 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502506 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502653 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504834 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504921 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504945 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504965 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505077 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505136 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505307 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505615 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.502908 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503001 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503074 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503269 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503487 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503505 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505702 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503593 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.503623 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504038 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.504586 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505046 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505996 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.506380 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.506493 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.506546 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.506743 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505583 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.505642 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.507240 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510213 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510271 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510308 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510338 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510367 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510397 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510425 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510452 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510477 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510506 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510534 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510564 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510593 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510624 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510654 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510680 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510704 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510735 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510731 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.511241 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.511285 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.511327 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.510766 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.511796 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512030 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512119 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512202 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512248 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512296 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512350 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512375 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512395 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512464 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512507 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512544 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512680 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512717 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512755 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512780 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512809 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512839 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512866 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512896 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512923 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512952 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512979 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513030 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513060 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513039 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513087 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513117 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513146 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513177 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513206 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513234 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513258 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513284 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513311 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513336 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513363 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513390 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513413 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513438 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513461 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513491 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513519 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513546 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513579 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513604 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513632 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513659 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513696 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513734 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513758 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513782 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513806 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513832 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513857 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513881 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513905 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513929 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513953 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513976 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514019 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514045 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514094 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514121 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514146 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514172 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514196 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514223 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514248 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514275 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514301 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514327 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514355 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514383 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514410 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514438 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514467 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514496 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514526 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514556 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514587 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514616 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514646 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514673 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514699 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514724 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514755 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514781 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514809 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514837 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514862 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514885 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514912 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515000 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515044 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515070 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515096 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515119 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515228 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515260 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515288 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515314 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515409 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-hostroot\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515440 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-daemon-config\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515467 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-proxy-tls\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515490 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jz5l\" (UniqueName: \"kubernetes.io/projected/15207e5d-cdbd-432f-bef7-cfb6992808f5-kube-api-access-6jz5l\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515512 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-netns\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lghj\" (UniqueName: \"kubernetes.io/projected/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-kube-api-access-7lghj\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515555 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-binary-copy\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515576 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-system-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515619 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515641 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-mcd-auth-proxy-config\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515658 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-os-release\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515683 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-k8s-cni-cncf-io\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515705 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-conf-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515727 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515759 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-socket-dir-parent\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515782 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515803 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515827 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cni-binary-copy\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515851 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-rootfs\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515897 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515920 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-multus-certs\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515941 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzjjg\" (UniqueName: \"kubernetes.io/projected/7220d5de-3096-474d-af9b-1276a2e41bd0-kube-api-access-bzjjg\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515962 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-multus\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516030 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7220d5de-3096-474d-af9b-1276a2e41bd0-hosts-file\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516051 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516074 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwqwg\" (UniqueName: \"kubernetes.io/projected/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-kube-api-access-rwqwg\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516097 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cnibin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516117 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516140 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-cnibin\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516164 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516186 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-etc-kubernetes\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516206 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-system-cni-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516229 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516250 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516270 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-os-release\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516291 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-bin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516311 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516334 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516355 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-kubelet\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516377 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516491 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516505 4760 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516517 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516530 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516543 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516555 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516565 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516577 4760 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516588 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516598 4760 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516609 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516619 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516630 4760 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516639 4760 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516651 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516661 4760 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516671 4760 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516681 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516691 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516702 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516713 4760 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516726 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516736 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516747 4760 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516760 4760 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516771 4760 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516781 4760 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516791 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516801 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516812 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516823 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516833 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516844 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516857 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516867 4760 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516877 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516887 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516898 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516910 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516921 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516933 4760 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516943 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516954 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516966 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516977 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516987 4760 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516997 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517021 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517032 4760 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517042 4760 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517052 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517063 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517073 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517084 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517096 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517106 4760 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517116 4760 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517126 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517140 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517150 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517161 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517171 4760 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517184 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517195 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517207 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517218 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517228 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517238 4760 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517249 4760 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517258 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517268 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517278 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517289 4760 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517299 4760 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518228 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518466 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518558 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-k8s-cni-cncf-io\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518591 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-conf-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518730 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-socket-dir-parent\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518774 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cnibin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518814 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518924 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.519219 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-bin\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.519287 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-os-release\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.519359 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-etc-kubernetes\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.519408 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-kubelet\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.521237 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-multus-certs\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.521311 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-var-lib-cni-multus\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.521821 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512623 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.512818 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.513833 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.514981 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515461 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515729 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.515871 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516050 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516171 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516670 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516876 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.516942 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517477 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.517504 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518662 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.518687 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.518776 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.527949 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:47.027888931 +0000 UTC m=+22.350770481 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.528457 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.528821 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-cni-binary-copy\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.528926 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529176 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529176 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529558 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.519386 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.521765 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529299 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529364 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529714 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529819 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529628 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530025 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530188 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530401 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-multus-daemon-config\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530381 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.529520 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.519266 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530524 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.530619 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:47.030564397 +0000 UTC m=+22.353445947 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530675 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-host-run-netns\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530677 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.530851 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531135 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531143 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531228 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531419 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-system-cni-dir\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.531528 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:03:47.031516954 +0000 UTC m=+22.354398504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531683 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.531838 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.531873 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.531903 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.531949 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-hostroot\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.531991 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:47.031962246 +0000 UTC m=+22.354843826 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.532040 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.532349 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.532549 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.533154 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.533878 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.534247 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.534526 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.534669 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.534778 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.535709 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.535976 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.536111 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.536278 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.539202 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.540915 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.544136 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.544326 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.544486 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.544989 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.545336 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.545383 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.545550 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.545748 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.546045 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.546181 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.546228 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.546252 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.546855 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.547516 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.547585 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.547936 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548092 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548137 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548143 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548153 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548098 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.547367 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548491 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.549104 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.549403 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.548775 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.550745 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.551330 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lghj\" (UniqueName: \"kubernetes.io/projected/ea01e72c-3c1c-465f-a4cb-90eb34c2f871-kube-api-access-7lghj\") pod \"multus-8x59s\" (UID: \"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\") " pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.554229 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.554432 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.554786 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.555270 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.555990 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.556898 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557081 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557185 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557438 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557623 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557825 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.557880 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.558041 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.558038 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.558145 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.558659 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.558660 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.559090 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.559825 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.560087 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.560483 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.560521 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.560880 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.561020 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.561231 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.561293 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.561695 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.561956 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.562141 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.562438 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.562473 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.562754 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.562874 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.563569 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.563307 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.564250 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.564712 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.565296 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.565862 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.566147 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.571343 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.573343 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.581442 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.585348 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.592164 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t55f2"] Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.593551 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.596203 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597168 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597414 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597551 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597663 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597869 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.597897 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.598108 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.598156 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.608049 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618308 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618650 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618721 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618768 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618812 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-rootfs\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618848 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618883 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzjjg\" (UniqueName: \"kubernetes.io/projected/7220d5de-3096-474d-af9b-1276a2e41bd0-kube-api-access-bzjjg\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618916 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618946 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85wkh\" (UniqueName: \"kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.618983 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7220d5de-3096-474d-af9b-1276a2e41bd0-hosts-file\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619035 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619065 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwqwg\" (UniqueName: \"kubernetes.io/projected/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-kube-api-access-rwqwg\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619094 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619127 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-cnibin\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619168 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619204 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-system-cni-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619234 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619263 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619290 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619348 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619377 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619402 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619426 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619451 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619481 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619514 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-proxy-tls\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619549 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jz5l\" (UniqueName: \"kubernetes.io/projected/15207e5d-cdbd-432f-bef7-cfb6992808f5-kube-api-access-6jz5l\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619579 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619612 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619636 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-cnibin\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619664 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-mcd-auth-proxy-config\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.619755 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-system-cni-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620203 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7220d5de-3096-474d-af9b-1276a2e41bd0-hosts-file\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620223 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-os-release\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620325 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-os-release\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620519 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620550 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-mcd-auth-proxy-config\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620746 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-binary-copy\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.620951 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-rootfs\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.621428 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.621649 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.621703 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.621761 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622249 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/15207e5d-cdbd-432f-bef7-cfb6992808f5-cni-binary-copy\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622549 4760 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622660 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622763 4760 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622854 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.622937 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623046 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623148 4760 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623227 4760 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623306 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623394 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623481 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623558 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623639 4760 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623737 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623828 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623906 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.623998 4760 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624100 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624179 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624254 4760 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624345 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624422 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624499 4760 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624581 4760 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624678 4760 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624774 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624880 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624996 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625133 4760 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625243 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625353 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625486 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625602 4760 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625726 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625820 4760 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.625917 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626025 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626148 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626234 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626320 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626415 4760 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626510 4760 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626611 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626725 4760 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626844 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.626956 4760 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627079 4760 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627174 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627273 4760 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627372 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627461 4760 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627550 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627647 4760 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627750 4760 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627849 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.627942 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628062 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628153 4760 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628239 4760 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628349 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628441 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628531 4760 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628642 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628755 4760 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628846 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628931 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629034 4760 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629130 4760 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629215 4760 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629290 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629385 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629470 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629564 4760 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629657 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629753 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629857 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629965 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630160 4760 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630280 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630377 4760 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630497 4760 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630597 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630718 4760 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630805 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630900 4760 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.630981 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631105 4760 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631215 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631305 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631396 4760 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631488 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631605 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631738 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631847 4760 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.631935 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632076 4760 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632169 4760 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632248 4760 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632343 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632441 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632533 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632615 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632717 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632829 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.632957 4760 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633111 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633229 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633351 4760 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633463 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633576 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633705 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633828 4760 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.633948 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634119 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.628301 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/15207e5d-cdbd-432f-bef7-cfb6992808f5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634244 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634370 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634387 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634400 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634412 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.629479 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.634425 4760 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.624881 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-proxy-tls\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.635895 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwqwg\" (UniqueName: \"kubernetes.io/projected/f71fb2ac-0373-4606-a20a-0b60ca26fbc3-kube-api-access-rwqwg\") pod \"machine-config-daemon-vgbxz\" (UID: \"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\") " pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.637240 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzjjg\" (UniqueName: \"kubernetes.io/projected/7220d5de-3096-474d-af9b-1276a2e41bd0-kube-api-access-bzjjg\") pod \"node-resolver-49579\" (UID: \"7220d5de-3096-474d-af9b-1276a2e41bd0\") " pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.637307 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jz5l\" (UniqueName: \"kubernetes.io/projected/15207e5d-cdbd-432f-bef7-cfb6992808f5-kube-api-access-6jz5l\") pod \"multus-additional-cni-plugins-v5p49\" (UID: \"15207e5d-cdbd-432f-bef7-cfb6992808f5\") " pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.648913 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.653777 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.663152 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.663575 4760 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.663892 4760 scope.go:117] "RemoveContainer" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.664146 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.675692 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.685122 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.702662 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.704735 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.719242 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-387df0ae60a56e05a3ac5cf1c54b744cd35cc78015d1c125ebb913ab70e98b21 WatchSource:0}: Error finding container 387df0ae60a56e05a3ac5cf1c54b744cd35cc78015d1c125ebb913ab70e98b21: Status 404 returned error can't find the container with id 387df0ae60a56e05a3ac5cf1c54b744cd35cc78015d1c125ebb913ab70e98b21 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.721686 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411d3b579052c9456be49de0120bec9eddf1ba24ed431049886fc7e5d41a7e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:39Z\\\",\\\"message\\\":\\\"W1124 17:03:28.738418 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1124 17:03:28.739104 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764003808 cert, and key in /tmp/serving-cert-1369365578/serving-signer.crt, /tmp/serving-cert-1369365578/serving-signer.key\\\\nI1124 17:03:29.116761 1 observer_polling.go:159] Starting file observer\\\\nW1124 17:03:29.121838 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1124 17:03:29.122093 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:29.125457 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1369365578/tls.crt::/tmp/serving-cert-1369365578/tls.key\\\\\\\"\\\\nF1124 17:03:39.461521 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735056 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735351 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735385 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735408 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735438 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735460 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85wkh\" (UniqueName: \"kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735487 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735546 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735597 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735684 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735711 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735731 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735752 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735783 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735805 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735823 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735829 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735843 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735864 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735887 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735892 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735910 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735929 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735930 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735992 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736162 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736191 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.735864 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736279 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736339 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736351 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736374 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736740 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736407 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736471 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736628 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736632 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.736397 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.737465 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.740265 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.748227 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.751933 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.752871 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-29ab3273dad67ebc058169fd650e7f15bbfd431604856798217e36a224de0920 WatchSource:0}: Error finding container 29ab3273dad67ebc058169fd650e7f15bbfd431604856798217e36a224de0920: Status 404 returned error can't find the container with id 29ab3273dad67ebc058169fd650e7f15bbfd431604856798217e36a224de0920 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.758815 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-49579" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.761442 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85wkh\" (UniqueName: \"kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh\") pod \"ovnkube-node-t55f2\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.766353 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8x59s" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.775976 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-v5p49" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.776658 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.777323 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ba2094d5d80b23447a571bc7b124a2eb488976d47ed97efefd7033a21a42b287 WatchSource:0}: Error finding container ba2094d5d80b23447a571bc7b124a2eb488976d47ed97efefd7033a21a42b287: Status 404 returned error can't find the container with id ba2094d5d80b23447a571bc7b124a2eb488976d47ed97efefd7033a21a42b287 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.783462 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.802195 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.808641 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7220d5de_3096_474d_af9b_1276a2e41bd0.slice/crio-d2a68678ca9607156fc0759b97acd62b0a069d4d728be567362cf30e55c24194 WatchSource:0}: Error finding container d2a68678ca9607156fc0759b97acd62b0a069d4d728be567362cf30e55c24194: Status 404 returned error can't find the container with id d2a68678ca9607156fc0759b97acd62b0a069d4d728be567362cf30e55c24194 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.815686 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.822899 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15207e5d_cdbd_432f_bef7_cfb6992808f5.slice/crio-45bf40bcd0d670c2be7f103ae71ae1347fc818689afd7232be209a69c3a965c7 WatchSource:0}: Error finding container 45bf40bcd0d670c2be7f103ae71ae1347fc818689afd7232be209a69c3a965c7: Status 404 returned error can't find the container with id 45bf40bcd0d670c2be7f103ae71ae1347fc818689afd7232be209a69c3a965c7 Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.828804 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.841189 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.854640 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.883953 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.896176 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.907101 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.907628 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.918277 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.937188 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.939198 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.939410 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.939461 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.939483 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: E1124 17:03:46.939560 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:47.939535712 +0000 UTC m=+23.262417262 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:46 crc kubenswrapper[4760]: W1124 17:03:46.946416 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1ccc7f2_1c1b_42b4_aac5_a9865757a92b.slice/crio-f0d515499e3c7cabe4e35991e6813204363fa60080f8c63c2d36630e178aa82f WatchSource:0}: Error finding container f0d515499e3c7cabe4e35991e6813204363fa60080f8c63c2d36630e178aa82f: Status 404 returned error can't find the container with id f0d515499e3c7cabe4e35991e6813204363fa60080f8c63c2d36630e178aa82f Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.950548 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:46 crc kubenswrapper[4760]: I1124 17:03:46.977759 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.040819 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.040961 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.040988 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.041203 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.041266 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:03:48.041210924 +0000 UTC m=+23.364092474 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.041352 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.041405 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:48.041391349 +0000 UTC m=+23.364272899 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.041531 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.041643 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:48.041621895 +0000 UTC m=+23.364503525 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.042313 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.042333 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.042347 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.042405 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:48.042394347 +0000 UTC m=+23.365275887 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.466364 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.466960 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.470780 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.471664 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.472943 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.473624 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.474678 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.475236 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.475887 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.477070 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.477671 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.478727 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.479301 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.480428 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.480958 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.481557 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.482544 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.483086 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.484020 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.484445 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.485051 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.486113 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.486570 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.487620 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.488318 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.489424 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.490060 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.490823 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.492135 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.492698 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.494023 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.494592 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.495617 4760 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.495739 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.497491 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.498669 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.499163 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.501019 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.501668 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.502618 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.503395 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.504528 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.505130 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.506374 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.507088 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.508375 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.508851 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.509812 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.510476 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.511863 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.512343 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.513249 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.513739 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.514730 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.515315 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.515763 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.663689 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.663750 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.663767 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"50290f06b193327435b90947ae60e62dfb279a6917c670d32c6f46ef150ee0e4"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.667952 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerStarted","Data":"ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.668061 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerStarted","Data":"772c4da586aeaa5efbd1d4bd187357f6c737b078360ffdc294f407b85d4bca2a"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.671320 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.671370 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.671381 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"29ab3273dad67ebc058169fd650e7f15bbfd431604856798217e36a224de0920"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.675382 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf" exitCode=0 Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.675521 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.675563 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerStarted","Data":"45bf40bcd0d670c2be7f103ae71ae1347fc818689afd7232be209a69c3a965c7"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.677483 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-49579" event={"ID":"7220d5de-3096-474d-af9b-1276a2e41bd0","Type":"ContainerStarted","Data":"85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.677509 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-49579" event={"ID":"7220d5de-3096-474d-af9b-1276a2e41bd0","Type":"ContainerStarted","Data":"d2a68678ca9607156fc0759b97acd62b0a069d4d728be567362cf30e55c24194"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.679415 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ba2094d5d80b23447a571bc7b124a2eb488976d47ed97efefd7033a21a42b287"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.681321 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.681401 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"387df0ae60a56e05a3ac5cf1c54b744cd35cc78015d1c125ebb913ab70e98b21"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.683301 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" exitCode=0 Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.683430 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.683496 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"f0d515499e3c7cabe4e35991e6813204363fa60080f8c63c2d36630e178aa82f"} Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.683992 4760 scope.go:117] "RemoveContainer" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.684184 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.686812 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.700509 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.717963 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.732664 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.744207 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.765598 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.781354 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.791410 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.803471 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.818086 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.830558 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.843060 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.864751 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.877891 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.895311 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.908722 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.952199 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.952448 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.952493 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.952506 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:47 crc kubenswrapper[4760]: E1124 17:03:47.952581 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:49.952556602 +0000 UTC m=+25.275438152 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.953117 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:47 crc kubenswrapper[4760]: I1124 17:03:47.977224 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.001975 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:47Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.016164 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.029935 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.048427 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.052779 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.052985 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.053067 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053113 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:03:50.053081363 +0000 UTC m=+25.375962913 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.053167 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053241 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053324 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:50.053306189 +0000 UTC m=+25.376187799 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053363 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053380 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053392 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053438 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:50.053427262 +0000 UTC m=+25.376308812 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053481 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.053505 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:50.053498814 +0000 UTC m=+25.376380354 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.070443 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.083368 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.465495 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.465591 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.466187 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:48 crc kubenswrapper[4760]: E1124 17:03:48.466275 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.545278 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.551709 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.556968 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.569381 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.587074 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.603652 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.617395 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.629828 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.646805 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.664568 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.689051 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.689108 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.689119 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.689129 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.690704 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.690872 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748" exitCode=0 Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.690899 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748"} Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.714606 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.734902 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.754456 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.769021 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.787380 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.829885 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.854717 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.875334 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.901353 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.929637 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.945756 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.958980 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.976932 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:48 crc kubenswrapper[4760]: I1124 17:03:48.991513 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:48Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.009398 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.020747 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.041721 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.465498 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:49 crc kubenswrapper[4760]: E1124 17:03:49.466345 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.697903 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.697958 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.700358 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53" exitCode=0 Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.700398 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53"} Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.715596 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.733725 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.758370 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.776117 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.791116 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.802956 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.815204 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.829083 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.850855 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.873622 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.887744 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.897606 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.908450 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:49Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:49 crc kubenswrapper[4760]: I1124 17:03:49.978687 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:49 crc kubenswrapper[4760]: E1124 17:03:49.979039 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:49 crc kubenswrapper[4760]: E1124 17:03:49.979088 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:49 crc kubenswrapper[4760]: E1124 17:03:49.979111 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:49 crc kubenswrapper[4760]: E1124 17:03:49.979208 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:53.979180632 +0000 UTC m=+29.302062222 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.079319 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.079493 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079529 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:03:54.079503006 +0000 UTC m=+29.402384566 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079591 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.079598 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079637 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:54.07962476 +0000 UTC m=+29.402506320 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.079696 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079843 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079944 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.079996 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:54.079963029 +0000 UTC m=+29.402844619 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.080039 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.080096 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.080236 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:03:54.080174995 +0000 UTC m=+29.403056715 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.465386 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.465444 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.465587 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:50 crc kubenswrapper[4760]: E1124 17:03:50.465738 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.710119 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57" exitCode=0 Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.710249 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57"} Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.712076 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb"} Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.749191 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.773276 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.795178 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.816057 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.839212 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.857537 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.872517 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.891514 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.911149 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.928761 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.944355 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:50 crc kubenswrapper[4760]: I1124 17:03:50.972183 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.003563 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:50Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.023970 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.044089 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.060131 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.074205 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.088463 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.101983 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.113928 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.125095 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.146812 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.167529 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.182653 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.183087 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.184390 4760 scope.go:117] "RemoveContainer" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.184683 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.199754 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.225394 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.466539 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.466755 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.526625 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vx8zv"] Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.527793 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.535231 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.535233 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.538115 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.538252 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.552057 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.568570 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.586645 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.589621 4760 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.592279 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.592346 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.592364 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.592537 4760 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.598620 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8pkk\" (UniqueName: \"kubernetes.io/projected/98ad2e16-05ae-4094-93ad-d636fdbfecaf-kube-api-access-h8pkk\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.599940 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/98ad2e16-05ae-4094-93ad-d636fdbfecaf-serviceca\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.600033 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/98ad2e16-05ae-4094-93ad-d636fdbfecaf-host\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.603220 4760 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.603576 4760 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.604880 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.604926 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.604942 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.604967 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.604983 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.611935 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.624154 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.630457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.630494 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.630509 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.630532 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.630548 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.635648 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.652319 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.656578 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.657488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.657551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.657571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.657599 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.657618 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.680352 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.684607 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.688138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.688210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.688229 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.688257 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.688276 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.701598 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/98ad2e16-05ae-4094-93ad-d636fdbfecaf-serviceca\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.701695 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/98ad2e16-05ae-4094-93ad-d636fdbfecaf-host\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.701765 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8pkk\" (UniqueName: \"kubernetes.io/projected/98ad2e16-05ae-4094-93ad-d636fdbfecaf-kube-api-access-h8pkk\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.701907 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/98ad2e16-05ae-4094-93ad-d636fdbfecaf-host\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.702213 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.703674 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/98ad2e16-05ae-4094-93ad-d636fdbfecaf-serviceca\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.708602 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.708666 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.708681 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.708706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.708724 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.717203 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.723768 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.729417 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: E1124 17:03:51.729640 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.733612 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459" exitCode=0 Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.733843 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.733992 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.734064 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.734082 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.734106 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.734127 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.739254 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8pkk\" (UniqueName: \"kubernetes.io/projected/98ad2e16-05ae-4094-93ad-d636fdbfecaf-kube-api-access-h8pkk\") pod \"node-ca-vx8zv\" (UID: \"98ad2e16-05ae-4094-93ad-d636fdbfecaf\") " pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.739928 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.755617 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.779975 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.796171 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.809419 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.832559 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.839210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.839257 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.839270 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.839289 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.839303 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.849067 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.855175 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vx8zv" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.873729 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: W1124 17:03:51.875404 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98ad2e16_05ae_4094_93ad_d636fdbfecaf.slice/crio-8d0b1744fa23ae72f3297d933853b84cfdfa1d4f56a7136516a864b4de505ca4 WatchSource:0}: Error finding container 8d0b1744fa23ae72f3297d933853b84cfdfa1d4f56a7136516a864b4de505ca4: Status 404 returned error can't find the container with id 8d0b1744fa23ae72f3297d933853b84cfdfa1d4f56a7136516a864b4de505ca4 Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.898551 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.922582 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.943281 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.944129 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.944167 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.945266 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.945342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.945373 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:51Z","lastTransitionTime":"2025-11-24T17:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.960382 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:51 crc kubenswrapper[4760]: I1124 17:03:51.978710 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:51.999991 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.022569 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.037855 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.048458 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.048531 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.048551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.048580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.048606 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.056341 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.071840 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.091221 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.109218 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.152614 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.152675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.152695 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.152723 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.152743 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.259829 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.260745 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.261098 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.261511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.261958 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.366609 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.366715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.366736 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.366764 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.366782 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.465835 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.465860 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:52 crc kubenswrapper[4760]: E1124 17:03:52.466095 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:52 crc kubenswrapper[4760]: E1124 17:03:52.466241 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.470851 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.470916 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.470941 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.470974 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.470998 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.574888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.574954 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.574972 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.575072 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.575094 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.677646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.677727 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.677751 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.677781 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.677797 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.740867 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vx8zv" event={"ID":"98ad2e16-05ae-4094-93ad-d636fdbfecaf","Type":"ContainerStarted","Data":"fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.740948 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vx8zv" event={"ID":"98ad2e16-05ae-4094-93ad-d636fdbfecaf","Type":"ContainerStarted","Data":"8d0b1744fa23ae72f3297d933853b84cfdfa1d4f56a7136516a864b4de505ca4"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.747140 4760 generic.go:334] "Generic (PLEG): container finished" podID="15207e5d-cdbd-432f-bef7-cfb6992808f5" containerID="331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592" exitCode=0 Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.747211 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerDied","Data":"331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.766967 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.781213 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.781288 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.781314 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.781342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.781361 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.784079 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.809648 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.833101 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.852296 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.868533 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.885504 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.885564 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.885576 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.885608 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.885621 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.888577 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.907798 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.924359 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.944480 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.961257 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.978796 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.988333 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.988362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.988374 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.988392 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:52 crc kubenswrapper[4760]: I1124 17:03:52.988405 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:52Z","lastTransitionTime":"2025-11-24T17:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.004449 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.040385 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.065384 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.087097 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.093894 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.094099 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.094223 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.094360 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.094475 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.106578 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.125540 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.143379 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.161838 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.180599 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.198122 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.198165 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.198181 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.198202 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.198220 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.206333 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.229183 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.252054 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.278319 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.301049 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.301107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.301122 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.301142 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.301154 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.319728 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.340824 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.359225 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.404627 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.404685 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.404700 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.404727 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.404740 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.466163 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:53 crc kubenswrapper[4760]: E1124 17:03:53.466634 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.508683 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.508747 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.508764 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.508794 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.508815 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.613291 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.613337 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.613356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.613379 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.613392 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.716622 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.716705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.716730 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.716766 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.716794 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.756717 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" event={"ID":"15207e5d-cdbd-432f-bef7-cfb6992808f5","Type":"ContainerStarted","Data":"d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.765037 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.765356 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.776124 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.820287 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.820330 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.820341 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.820361 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.820376 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.842242 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.846542 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.861732 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.881173 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.901283 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.920656 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.923463 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.923536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.923554 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.923580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.923602 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:53Z","lastTransitionTime":"2025-11-24T17:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.946045 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.972536 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:53 crc kubenswrapper[4760]: I1124 17:03:53.992374 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.006865 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027545 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027615 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027653 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.027689 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.033504 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.033697 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.033738 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.033764 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.033827 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.033805318 +0000 UTC m=+37.356686878 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.048483 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.065155 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.079847 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.101087 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.117414 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.130976 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.131047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.131064 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.131079 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.131090 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.134650 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.134821 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.13479586 +0000 UTC m=+37.457677410 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.134891 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.134958 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.135001 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135093 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135144 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.13513623 +0000 UTC m=+37.458017780 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135162 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135250 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.135227603 +0000 UTC m=+37.458109313 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135244 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135299 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135319 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.135448 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.135417798 +0000 UTC m=+37.458299378 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.138178 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.155693 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.175527 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.193387 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.208348 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.236781 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.236860 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.236880 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.236909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.236935 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.240030 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.264102 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.290502 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.314746 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.340966 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.341057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.341076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.341108 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.341125 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.342561 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.365437 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.384951 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.445379 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.445427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.445441 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.445460 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.445473 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.465832 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.465840 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.466114 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:54 crc kubenswrapper[4760]: E1124 17:03:54.466381 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.548819 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.548856 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.548869 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.548889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.548900 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.652544 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.652633 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.652657 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.652694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.652733 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.755981 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.756076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.756107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.756138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.756162 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.768200 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.768996 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.802384 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.821737 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.843681 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.859557 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.859619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.859639 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.859666 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.859684 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.863348 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.882500 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.902532 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.919757 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.933146 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.952577 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.963292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.963483 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.963649 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.963770 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:54 crc kubenswrapper[4760]: I1124 17:03:54.963879 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:54Z","lastTransitionTime":"2025-11-24T17:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.015924 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.039607 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.066516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.066592 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.066613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.066643 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.066665 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.078270 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.099667 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.121845 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.138570 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.170798 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.170851 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.170868 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.170887 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.170901 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.208488 4760 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.273743 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.274086 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.274180 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.274264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.274338 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.377171 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.377620 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.377735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.377988 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.378120 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.466530 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:55 crc kubenswrapper[4760]: E1124 17:03:55.467024 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.480424 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.480691 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.480886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.480991 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.481093 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.488300 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.515278 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.528902 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.551411 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.572506 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.584089 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.584210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.584269 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.584354 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.584415 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.589130 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.610948 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.640387 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.657815 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.673733 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.690981 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.691033 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.691043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.691060 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.691071 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.697923 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.710512 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.727869 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.741968 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.771169 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.794659 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.794712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.794725 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.794745 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.794761 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.898108 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.898154 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.898165 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.898185 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:55 crc kubenswrapper[4760]: I1124 17:03:55.898196 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:55Z","lastTransitionTime":"2025-11-24T17:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.000583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.000638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.000650 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.000669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.000988 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.105254 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.105662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.105919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.106327 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.106670 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.210580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.210661 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.210686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.210719 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.210740 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.313302 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.313351 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.313373 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.313394 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.313405 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.416754 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.416791 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.416800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.416815 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.416825 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.465515 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.465585 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:56 crc kubenswrapper[4760]: E1124 17:03:56.465662 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:56 crc kubenswrapper[4760]: E1124 17:03:56.465778 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.520483 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.520531 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.520539 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.520557 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.520567 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.624767 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.624841 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.624860 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.624888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.624910 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.727306 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.727350 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.727360 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.727381 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.727394 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.776842 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/0.log" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.781077 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0" exitCode=1 Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.781144 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.781916 4760 scope.go:117] "RemoveContainer" containerID="543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.810322 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.830287 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.830351 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.830364 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.830391 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.830407 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.844149 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.874491 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:56Z\\\",\\\"message\\\":\\\"394688 6095 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.394923 6095 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395065 6095 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395217 6095 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:03:56.395354 6095 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395678 6095 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.396171 6095 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.396653 6095 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 17:03:56.396715 6095 factory.go:656] Stopping watch factory\\\\nI1124 17:03:56.396739 6095 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.899166 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.918633 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.935138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.935186 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.935206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.935227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.935242 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:56Z","lastTransitionTime":"2025-11-24T17:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.942172 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.964503 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:56 crc kubenswrapper[4760]: I1124 17:03:56.984680 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.000697 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:56Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.015286 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.030195 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.041462 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.041504 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.041516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.041537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.041552 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.047334 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.063609 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.079904 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.145302 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.145378 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.145394 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.145421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.145449 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.249335 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.249418 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.249438 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.249467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.249488 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.352414 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.352478 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.352491 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.352516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.352529 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.455610 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.455705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.455729 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.456231 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.456535 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.465866 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:57 crc kubenswrapper[4760]: E1124 17:03:57.466061 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.559845 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.559890 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.559902 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.559920 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.559935 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.662522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.662574 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.662590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.662612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.662629 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.765414 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.765499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.765523 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.765555 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.765576 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.790837 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/0.log" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.795858 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.796113 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.821730 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.845797 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.869086 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.869152 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.869169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.869197 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.869216 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.875831 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.901162 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.920237 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.939582 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.969680 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.972432 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.972535 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.972564 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.972605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.972636 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:57Z","lastTransitionTime":"2025-11-24T17:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:57 crc kubenswrapper[4760]: I1124 17:03:57.993657 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:57Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.009808 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.028187 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.048307 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.066465 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.076237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.076411 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.076476 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.076585 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.076654 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.090781 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.121879 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:56Z\\\",\\\"message\\\":\\\"394688 6095 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.394923 6095 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395065 6095 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395217 6095 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:03:56.395354 6095 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395678 6095 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.396171 6095 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.396653 6095 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 17:03:56.396715 6095 factory.go:656] Stopping watch factory\\\\nI1124 17:03:56.396739 6095 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.179616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.179675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.179690 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.179710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.179725 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.283471 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.283541 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.283559 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.283590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.283609 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.387588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.387648 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.387670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.387707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.387731 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.466268 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:03:58 crc kubenswrapper[4760]: E1124 17:03:58.466529 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.466982 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:03:58 crc kubenswrapper[4760]: E1124 17:03:58.467247 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.491327 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.491416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.491437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.491949 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.492212 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.595234 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.595295 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.595313 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.595341 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.595359 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.699060 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.699160 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.699182 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.699206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.699224 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802050 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802097 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802115 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802155 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.802694 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/1.log" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.803695 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/0.log" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.809387 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" exitCode=1 Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.809459 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.809580 4760 scope.go:117] "RemoveContainer" containerID="543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.810861 4760 scope.go:117] "RemoveContainer" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" Nov 24 17:03:58 crc kubenswrapper[4760]: E1124 17:03:58.811355 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.839769 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.862978 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.884433 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.904223 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.906675 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz"] Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.907358 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.907520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.907641 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.907762 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.907875 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:58Z","lastTransitionTime":"2025-11-24T17:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.910943 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.914411 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.915633 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.933776 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.955128 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.975736 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.993197 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.993271 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.993497 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mxpt\" (UniqueName: \"kubernetes.io/projected/83bc066f-db82-440f-b301-ae9f092bbdb1-kube-api-access-8mxpt\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:58 crc kubenswrapper[4760]: I1124 17:03:58.993574 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83bc066f-db82-440f-b301-ae9f092bbdb1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.002158 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:58Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.011692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.011757 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.011775 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.011802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.011819 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.021182 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.043246 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.066794 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.095164 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mxpt\" (UniqueName: \"kubernetes.io/projected/83bc066f-db82-440f-b301-ae9f092bbdb1-kube-api-access-8mxpt\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.095281 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83bc066f-db82-440f-b301-ae9f092bbdb1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.095390 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.095424 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.096827 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.098041 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83bc066f-db82-440f-b301-ae9f092bbdb1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.098074 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:56Z\\\",\\\"message\\\":\\\"394688 6095 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.394923 6095 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395065 6095 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395217 6095 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:03:56.395354 6095 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395678 6095 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.396171 6095 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.396653 6095 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 17:03:56.396715 6095 factory.go:656] Stopping watch factory\\\\nI1124 17:03:56.396739 6095 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.108065 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83bc066f-db82-440f-b301-ae9f092bbdb1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.114810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.114853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.114871 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.114895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.114915 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.121929 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.126779 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mxpt\" (UniqueName: \"kubernetes.io/projected/83bc066f-db82-440f-b301-ae9f092bbdb1-kube-api-access-8mxpt\") pod \"ovnkube-control-plane-749d76644c-gjlbz\" (UID: \"83bc066f-db82-440f-b301-ae9f092bbdb1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.142169 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.163564 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.180594 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.201143 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.217560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.217663 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.217685 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.217714 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.217732 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.225231 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.236074 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.245800 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.272614 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.294535 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.315479 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.320742 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.320761 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.320771 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.320786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.320795 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.331897 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.351560 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.376189 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.396290 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.417828 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.424738 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.424806 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.424830 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.424861 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.424882 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.438808 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.466309 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:03:59 crc kubenswrapper[4760]: E1124 17:03:59.466544 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.472036 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://543cb828ed089e66fea1c80fe4cfb18ecc8007afcc7bcbb024fac47fb9bc00d0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:56Z\\\",\\\"message\\\":\\\"394688 6095 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.394923 6095 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395065 6095 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395217 6095 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:03:56.395354 6095 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.395678 6095 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:56.396171 6095 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:03:56.396653 6095 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 17:03:56.396715 6095 factory.go:656] Stopping watch factory\\\\nI1124 17:03:56.396739 6095 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.528511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.528563 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.528577 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.528599 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.528617 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.631669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.631716 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.631729 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.631749 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.631763 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.734818 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.734863 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.734883 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.734900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.734911 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.817293 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/1.log" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.822599 4760 scope.go:117] "RemoveContainer" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" Nov 24 17:03:59 crc kubenswrapper[4760]: E1124 17:03:59.822859 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.824119 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" event={"ID":"83bc066f-db82-440f-b301-ae9f092bbdb1","Type":"ContainerStarted","Data":"a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.824164 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" event={"ID":"83bc066f-db82-440f-b301-ae9f092bbdb1","Type":"ContainerStarted","Data":"c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.824178 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" event={"ID":"83bc066f-db82-440f-b301-ae9f092bbdb1","Type":"ContainerStarted","Data":"c8b2b3370a6b144f0d77153bb51449536cd764b8030ee30b189d689d52fa8543"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.837831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.837877 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.837898 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.837919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.837931 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.840975 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.863722 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.892521 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.915731 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940759 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940797 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940806 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940823 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940833 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:03:59Z","lastTransitionTime":"2025-11-24T17:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.940879 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.958657 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.971050 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.984505 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:03:59 crc kubenswrapper[4760]: I1124 17:03:59.999496 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:03:59Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.016652 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.033345 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.043796 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.043884 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.043904 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.043934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.043953 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.050491 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.069696 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.087362 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.107337 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.127050 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146649 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146862 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146880 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.146892 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.159111 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.178284 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.198274 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.215992 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.233275 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.250778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.250826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.250843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.250871 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.250891 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.267289 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.290583 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.317067 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.352325 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.354218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.354270 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.354288 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.354314 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.354332 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.377098 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.400478 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.419132 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.445904 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.457529 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.457598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.457617 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.457643 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.457666 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.465907 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.465917 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:00 crc kubenswrapper[4760]: E1124 17:04:00.466086 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:00 crc kubenswrapper[4760]: E1124 17:04:00.466214 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.560953 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.561048 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.561067 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.561096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.561116 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.664668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.664793 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.664814 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.664844 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.664871 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.767872 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.767923 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.767943 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.767968 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.767987 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.831045 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-dz6vg"] Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.832731 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:00 crc kubenswrapper[4760]: E1124 17:04:00.832991 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.857866 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.871413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.871624 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.871722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.871828 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.871939 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.882440 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.904228 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.918902 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sw94\" (UniqueName: \"kubernetes.io/projected/e462626d-5645-4be7-89b4-383a4cde08f9-kube-api-access-6sw94\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.919798 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.921557 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.942102 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.967826 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.975442 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.975517 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.975546 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.975577 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.975601 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:00Z","lastTransitionTime":"2025-11-24T17:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:00 crc kubenswrapper[4760]: I1124 17:04:00.987195 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:00Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.007910 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.020935 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sw94\" (UniqueName: \"kubernetes.io/projected/e462626d-5645-4be7-89b4-383a4cde08f9-kube-api-access-6sw94\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.020987 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:01 crc kubenswrapper[4760]: E1124 17:04:01.021177 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:01 crc kubenswrapper[4760]: E1124 17:04:01.021238 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:01.521220565 +0000 UTC m=+36.844102115 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.033205 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.049367 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.059187 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sw94\" (UniqueName: \"kubernetes.io/projected/e462626d-5645-4be7-89b4-383a4cde08f9-kube-api-access-6sw94\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.066618 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.079184 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.079248 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.079264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.079289 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.079304 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.083617 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.100926 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.120087 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.143505 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.160570 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:01Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.182693 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.183256 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.183377 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.183499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.183601 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.287679 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.288105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.288283 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.288462 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.288617 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.392256 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.392310 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.392322 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.392345 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.392360 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.465922 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:01 crc kubenswrapper[4760]: E1124 17:04:01.466135 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.495216 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.495273 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.495284 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.495304 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.495316 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.528633 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:01 crc kubenswrapper[4760]: E1124 17:04:01.528863 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:01 crc kubenswrapper[4760]: E1124 17:04:01.529022 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:02.528986791 +0000 UTC m=+37.851868341 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.598793 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.599490 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.599514 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.599546 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.599566 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.702558 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.703036 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.703190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.703351 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.703509 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.807512 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.807583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.807603 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.807633 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.807654 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.911613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.911687 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.911705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.911731 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:01 crc kubenswrapper[4760]: I1124 17:04:01.911751 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:01Z","lastTransitionTime":"2025-11-24T17:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.015459 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.015522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.015542 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.015569 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.015587 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.036486 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.036793 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.036850 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.036871 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.036968 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:18.036940622 +0000 UTC m=+53.359822212 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.063527 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.063636 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.063664 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.063693 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.063711 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.086302 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.091915 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.091983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.092031 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.092060 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.092121 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.113213 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.117877 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.117934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.117951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.117974 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.117993 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.137765 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138076 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:04:18.137998938 +0000 UTC m=+53.460880568 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.138178 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.138297 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138451 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138502 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138510 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.138452 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138653 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138529 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138714 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:18.138662676 +0000 UTC m=+53.461544406 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138893 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:18.138870002 +0000 UTC m=+53.461751602 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.138930 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:18.138913553 +0000 UTC m=+53.461795413 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.139192 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.146209 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.146291 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.146312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.146342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.146362 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.169425 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.180301 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.180396 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.180426 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.180460 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.180509 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.206476 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.206698 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.209552 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.209647 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.209675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.209706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.209732 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.312695 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.313188 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.313290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.313434 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.313569 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.417224 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.417301 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.417322 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.417348 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.417366 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.466218 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.466301 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.466321 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.467088 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.467203 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.467301 4760 scope.go:117] "RemoveContainer" containerID="344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.466761 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.521145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.521268 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.521290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.521661 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.521690 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.544418 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.544767 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: E1124 17:04:02.544948 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:04.544903794 +0000 UTC m=+39.867785394 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.625355 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.625419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.625440 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.625465 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.625485 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.728870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.728934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.728954 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.728983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.729036 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.832136 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.832192 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.832210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.832238 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.832260 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.842560 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.845768 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.846354 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.868877 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.886477 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.905462 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.923747 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.935484 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.935559 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.935579 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.935610 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.935631 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:02Z","lastTransitionTime":"2025-11-24T17:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.946865 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.967828 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:02 crc kubenswrapper[4760]: I1124 17:04:02.986580 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:02Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.009350 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.030985 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.038758 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.038800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.038819 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.038847 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.038873 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.054068 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.077709 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.101469 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.128632 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.142492 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.142548 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.142571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.142603 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.142624 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.156840 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.192806 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.216682 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:03Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.247161 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.247229 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.247255 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.247290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.247313 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.351756 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.351807 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.351824 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.351849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.351867 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.456285 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.456385 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.456406 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.456431 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.456451 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.465847 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:03 crc kubenswrapper[4760]: E1124 17:04:03.466097 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.560515 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.560588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.560605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.560634 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.560661 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.664436 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.664477 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.664489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.664508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.664520 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.767777 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.767853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.767879 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.767918 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.767944 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.870546 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.870610 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.870631 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.870657 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.870675 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.973676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.973758 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.973783 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.973819 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:03 crc kubenswrapper[4760]: I1124 17:04:03.973923 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:03Z","lastTransitionTime":"2025-11-24T17:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.077242 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.077316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.077338 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.077368 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.077389 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.180575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.180648 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.180668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.180694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.180712 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.283253 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.283305 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.283316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.283334 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.283346 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.386767 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.386811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.386825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.386843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.386855 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.466419 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.466545 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.466434 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:04 crc kubenswrapper[4760]: E1124 17:04:04.466642 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:04 crc kubenswrapper[4760]: E1124 17:04:04.466784 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:04 crc kubenswrapper[4760]: E1124 17:04:04.467103 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.492505 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.492601 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.492613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.492636 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.492651 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.583373 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:04 crc kubenswrapper[4760]: E1124 17:04:04.583661 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:04 crc kubenswrapper[4760]: E1124 17:04:04.583847 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:08.583815252 +0000 UTC m=+43.906696842 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.596076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.596140 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.596154 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.596179 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.596196 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.699104 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.699193 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.699206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.699224 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.699240 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.802317 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.802411 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.802455 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.802491 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.802520 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.906341 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.906397 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.906416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.906439 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:04 crc kubenswrapper[4760]: I1124 17:04:04.906457 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:04Z","lastTransitionTime":"2025-11-24T17:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.009482 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.009549 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.009569 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.009593 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.009611 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.113039 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.113109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.113128 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.113158 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.113178 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.217102 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.217174 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.217203 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.217242 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.217268 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.320520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.320598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.320618 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.320644 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.320664 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.424932 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.425034 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.425063 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.425094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.425113 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.466468 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:05 crc kubenswrapper[4760]: E1124 17:04:05.466694 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.489425 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.509519 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528367 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528451 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528477 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528530 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.528554 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.546184 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.571759 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.592771 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.611164 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.626619 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.631835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.631890 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.631909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.631934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.631951 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.647997 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.669250 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.694212 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.717770 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.736388 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.736443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.736460 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.736484 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.736503 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.745445 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.766446 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.782913 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.800858 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.840668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.840799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.840829 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.840861 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.840881 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.944935 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.945000 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.945047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.945073 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:05 crc kubenswrapper[4760]: I1124 17:04:05.945096 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:05Z","lastTransitionTime":"2025-11-24T17:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.052830 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.052924 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.052944 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.053124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.053156 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.157366 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.157469 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.157494 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.157529 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.157555 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.261605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.261683 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.261703 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.261734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.261758 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.365762 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.365837 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.365862 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.365892 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.365911 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.465733 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.465787 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.465940 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:06 crc kubenswrapper[4760]: E1124 17:04:06.465939 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:06 crc kubenswrapper[4760]: E1124 17:04:06.466138 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:06 crc kubenswrapper[4760]: E1124 17:04:06.466304 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.469355 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.469425 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.469450 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.469479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.469502 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.572788 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.572849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.572867 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.572895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.572915 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.676894 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.676961 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.676978 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.677033 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.677053 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.779621 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.779686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.779707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.779733 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.779754 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.882163 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.882238 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.882260 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.882329 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.882355 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.985316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.985381 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.985399 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.985425 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:06 crc kubenswrapper[4760]: I1124 17:04:06.985441 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:06Z","lastTransitionTime":"2025-11-24T17:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.088321 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.088402 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.088422 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.088448 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.088468 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.191625 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.191684 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.191706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.191736 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.191758 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.294766 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.294825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.294843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.294870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.294890 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.374414 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.375883 4760 scope.go:117] "RemoveContainer" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" Nov 24 17:04:07 crc kubenswrapper[4760]: E1124 17:04:07.376406 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.397867 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.397917 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.397940 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.397966 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.397991 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.466401 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:07 crc kubenswrapper[4760]: E1124 17:04:07.466615 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.502124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.502259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.502285 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.502334 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.502361 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.605360 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.605421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.605439 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.605466 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.605488 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.708892 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.709275 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.709427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.709630 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.709770 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.813669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.813738 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.813786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.813816 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.813834 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.917692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.917759 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.917775 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.917804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:07 crc kubenswrapper[4760]: I1124 17:04:07.917824 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:07Z","lastTransitionTime":"2025-11-24T17:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.021622 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.021702 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.021729 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.021758 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.021777 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.125966 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.126084 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.126103 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.126126 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.126145 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.233050 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.233139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.233166 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.233199 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.233224 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.336909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.336983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.337036 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.337070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.337094 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.440089 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.440162 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.440181 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.440206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.440225 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.465905 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.465983 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:08 crc kubenswrapper[4760]: E1124 17:04:08.466234 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:08 crc kubenswrapper[4760]: E1124 17:04:08.466352 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.465946 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:08 crc kubenswrapper[4760]: E1124 17:04:08.466873 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.544204 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.544682 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.544843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.544986 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.545151 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.635487 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:08 crc kubenswrapper[4760]: E1124 17:04:08.635572 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:08 crc kubenswrapper[4760]: E1124 17:04:08.636089 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:16.636057759 +0000 UTC m=+51.958939339 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.649950 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.650086 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.650114 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.650148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.650174 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.753291 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.753745 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.753921 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.754132 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.754320 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.857917 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.857981 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.857999 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.858055 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.858077 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.961599 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.962109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.962286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.962428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:08 crc kubenswrapper[4760]: I1124 17:04:08.962581 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:08Z","lastTransitionTime":"2025-11-24T17:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.066449 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.067049 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.067288 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.067451 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.067620 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.171721 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.171815 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.171842 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.171874 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.171898 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.274876 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.274966 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.274983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.275038 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.275057 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.378571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.378707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.378727 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.378753 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.378771 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.465541 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:09 crc kubenswrapper[4760]: E1124 17:04:09.465792 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.482269 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.482368 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.482389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.482420 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.482445 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.585342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.585409 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.585430 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.585456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.585475 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.688566 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.688629 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.688647 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.688673 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.688691 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.792214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.792267 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.792287 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.792311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.792329 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.894963 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.895083 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.895105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.895172 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.895191 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.997326 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.997659 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.997743 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.997841 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:09 crc kubenswrapper[4760]: I1124 17:04:09.997928 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:09Z","lastTransitionTime":"2025-11-24T17:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.101853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.101927 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.101951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.101982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.102044 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.205696 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.205754 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.205778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.205805 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.205829 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.308998 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.309071 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.309086 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.309106 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.309120 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.411713 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.411787 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.411797 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.411822 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.411835 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.465997 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.466176 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:10 crc kubenswrapper[4760]: E1124 17:04:10.466290 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:10 crc kubenswrapper[4760]: E1124 17:04:10.466368 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.466447 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:10 crc kubenswrapper[4760]: E1124 17:04:10.466511 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.519551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.519660 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.519682 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.519710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.519732 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.623165 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.623218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.623237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.623260 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.623278 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.726710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.727124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.727258 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.727481 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.727632 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.831667 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.831729 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.831744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.831765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.831782 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.935675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.935743 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.935760 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.935783 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:10 crc kubenswrapper[4760]: I1124 17:04:10.935799 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:10Z","lastTransitionTime":"2025-11-24T17:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.038938 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.039032 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.039055 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.039081 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.039101 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.141574 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.141605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.141613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.141626 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.141636 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.246223 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.246314 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.246339 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.246368 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.246393 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.349992 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.350077 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.350096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.350123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.350142 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.453858 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.453960 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.453980 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.454045 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.454069 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.466355 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:11 crc kubenswrapper[4760]: E1124 17:04:11.466554 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.558124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.558178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.558193 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.558213 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.558227 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.662909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.662980 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.663033 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.663060 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.663082 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.766297 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.766362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.766380 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.766409 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.766432 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.870489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.870559 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.870579 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.870609 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.870630 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.974895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.974979 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.975037 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.975065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:11 crc kubenswrapper[4760]: I1124 17:04:11.975084 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:11Z","lastTransitionTime":"2025-11-24T17:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.079211 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.079296 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.079334 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.079363 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.079383 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.182512 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.182563 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.182579 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.182605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.182624 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.285891 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.285952 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.285965 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.285986 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.286000 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.389560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.389631 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.389650 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.389674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.389694 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.465773 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.465797 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.466044 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.466085 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.466414 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.466308 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.494362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.494438 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.494461 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.494489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.494510 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.598282 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.598388 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.598407 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.598445 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.598466 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.605310 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.605369 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.605390 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.605415 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.605434 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.627808 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.634074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.634190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.634210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.634231 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.634250 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.654306 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.660405 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.660464 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.660488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.660521 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.660543 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.688257 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.693637 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.693711 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.693730 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.693785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.693804 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.714676 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.719888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.719967 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.719984 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.720019 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.720034 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.740737 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:12Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:12 crc kubenswrapper[4760]: E1124 17:04:12.740975 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.743356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.743408 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.743428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.743457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.743478 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.847405 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.847463 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.847481 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.847553 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.847580 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.950718 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.950781 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.950791 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.950811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:12 crc kubenswrapper[4760]: I1124 17:04:12.950822 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:12Z","lastTransitionTime":"2025-11-24T17:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.056362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.056430 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.056447 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.056475 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.056495 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.160523 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.160606 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.160624 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.160650 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.160672 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.265735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.265791 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.265809 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.265834 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.265851 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.369173 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.369265 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.369282 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.369306 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.369324 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.466512 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:13 crc kubenswrapper[4760]: E1124 17:04:13.466710 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.473081 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.473138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.473150 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.473171 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.473184 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.576498 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.576557 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.576574 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.576599 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.576630 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.680662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.680728 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.680746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.680770 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.680786 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.783736 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.783802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.783819 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.783846 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.783867 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.887145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.887238 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.887259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.887321 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.887342 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.990834 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.990902 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.990920 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.990945 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:13 crc kubenswrapper[4760]: I1124 17:04:13.990962 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:13Z","lastTransitionTime":"2025-11-24T17:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.094836 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.094951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.094976 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.095028 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.095048 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.198789 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.198853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.198873 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.198901 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.198919 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.302081 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.302134 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.302148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.302169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.302182 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.406118 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.406190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.406208 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.406237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.406256 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.466087 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.466144 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:14 crc kubenswrapper[4760]: E1124 17:04:14.466299 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.466341 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:14 crc kubenswrapper[4760]: E1124 17:04:14.466508 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:14 crc kubenswrapper[4760]: E1124 17:04:14.466715 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.508924 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.508982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.508994 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.509029 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.509043 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.612421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.612538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.612559 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.612588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.612611 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.715511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.715652 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.715672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.715699 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.715718 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.819311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.819395 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.819423 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.819450 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.819468 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.922930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.922997 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.923056 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.923087 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:14 crc kubenswrapper[4760]: I1124 17:04:14.923108 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:14Z","lastTransitionTime":"2025-11-24T17:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.026298 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.026348 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.026366 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.026391 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.026410 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.130479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.130540 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.130557 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.130583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.130601 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.233416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.233477 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.233498 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.233522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.233541 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.271580 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.284760 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.298449 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.319881 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.336825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.336886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.336904 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.336930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.336948 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.340640 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.367331 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.402206 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.423852 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.439786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.439835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.439850 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.439870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.439885 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.443554 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.458903 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.466243 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:15 crc kubenswrapper[4760]: E1124 17:04:15.466528 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.480450 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.497993 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.516941 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.533361 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.543533 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.543592 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.543612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.543640 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.543660 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.556113 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.579770 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.598392 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.616896 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.638143 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.647465 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.647522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.647539 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.647565 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.647586 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.658924 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.676177 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.697503 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.720716 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751150 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751599 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751628 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.751651 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.771740 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.783976 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.798419 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.811905 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.826727 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.833904 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.841995 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.854492 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.854548 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.854567 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.854598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.854615 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.861977 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.879804 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.903188 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.925492 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959281 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959155 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959656 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.959734 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:15Z","lastTransitionTime":"2025-11-24T17:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:15 crc kubenswrapper[4760]: I1124 17:04:15.979874 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.001668 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:15Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.021829 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.043499 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.062978 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.063102 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.063123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.063154 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.063177 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.069118 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.090631 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.112384 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.136259 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.166056 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.166148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.166174 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.166211 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.166235 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.169235 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.190717 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.208658 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.226345 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.246164 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.270112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.270158 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.270170 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.270189 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.270203 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.271580 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.293223 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.310883 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.325874 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:16Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.372647 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.372734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.372753 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.372785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.372804 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.465851 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.465910 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.465849 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:16 crc kubenswrapper[4760]: E1124 17:04:16.466084 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:16 crc kubenswrapper[4760]: E1124 17:04:16.466379 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:16 crc kubenswrapper[4760]: E1124 17:04:16.466542 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.476212 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.476271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.476286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.476307 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.476329 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.580429 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.580492 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.580511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.580537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.580557 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.671122 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:16 crc kubenswrapper[4760]: E1124 17:04:16.671415 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:16 crc kubenswrapper[4760]: E1124 17:04:16.671554 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:32.671521731 +0000 UTC m=+67.994403321 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.683751 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.683828 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.683851 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.683878 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.683900 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.787624 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.787694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.787717 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.787751 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.787776 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.890900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.890982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.891042 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.891076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.891101 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.993919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.994053 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.994085 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.994123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:16 crc kubenswrapper[4760]: I1124 17:04:16.994148 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:16Z","lastTransitionTime":"2025-11-24T17:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.098150 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.098222 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.098247 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.098278 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.098301 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.201378 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.201455 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.201474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.201500 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.201517 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.305240 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.305330 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.305351 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.305374 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.305394 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.408974 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.409077 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.409101 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.409133 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.409158 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.466172 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:17 crc kubenswrapper[4760]: E1124 17:04:17.466432 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.513218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.513288 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.513308 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.513370 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.513393 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.617403 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.617471 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.617488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.617524 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.617547 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.721084 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.721190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.721219 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.721252 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.721271 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.825626 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.825696 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.825717 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.825741 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.825760 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.960174 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.960565 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.960585 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.960612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:17 crc kubenswrapper[4760]: I1124 17:04:17.960632 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:17Z","lastTransitionTime":"2025-11-24T17:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.064274 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.064363 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.064391 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.064426 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.064445 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.087845 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.088177 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.088242 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.088258 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.088353 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:50.08832736 +0000 UTC m=+85.411208920 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.167853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.167906 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.167924 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.167949 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.167969 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.188578 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.188703 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.188759 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.188803 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:04:50.188770788 +0000 UTC m=+85.511652368 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.188893 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.188920 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.188982 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189041 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:50.188982454 +0000 UTC m=+85.511864054 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189050 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189074 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189087 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189148 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:50.189129598 +0000 UTC m=+85.512011188 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.189180 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:04:50.189167829 +0000 UTC m=+85.512049419 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.270913 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.271045 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.271066 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.271091 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.271108 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.374612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.374677 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.374695 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.374720 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.374739 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.465543 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.465677 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.465794 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.465823 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.465981 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:18 crc kubenswrapper[4760]: E1124 17:04:18.466153 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.479982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.480098 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.480157 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.480182 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.480203 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.583822 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.583893 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.583911 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.583938 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.583957 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.688193 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.688265 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.688292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.688319 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.688339 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.792694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.792761 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.792779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.792802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.792819 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.894884 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.894923 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.894933 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.894946 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.894955 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.998508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.998643 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.998665 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.998692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:18 crc kubenswrapper[4760]: I1124 17:04:18.998720 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:18Z","lastTransitionTime":"2025-11-24T17:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.102715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.102774 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.102808 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.102831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.102847 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.206356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.206417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.206434 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.206459 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.206477 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.309512 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.309584 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.309601 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.309626 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.309644 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.413345 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.413427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.413448 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.413480 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.413504 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.466513 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:19 crc kubenswrapper[4760]: E1124 17:04:19.466718 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.516376 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.516440 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.516457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.516481 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.516500 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.619563 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.619614 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.619632 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.619655 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.619678 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.722251 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.722310 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.722327 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.722351 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.722371 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.825191 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.825264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.825286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.825315 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.825341 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.928147 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.928206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.928226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.928252 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:19 crc kubenswrapper[4760]: I1124 17:04:19.928270 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:19Z","lastTransitionTime":"2025-11-24T17:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.031999 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.032097 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.032115 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.032141 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.032160 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.135398 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.135475 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.135496 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.135522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.135541 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.237884 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.237928 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.237939 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.237957 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.237969 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.340589 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.340677 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.340697 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.340722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.340741 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.443541 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.443607 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.443621 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.443642 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.443658 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.466051 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.466210 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.466108 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:20 crc kubenswrapper[4760]: E1124 17:04:20.466332 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:20 crc kubenswrapper[4760]: E1124 17:04:20.466427 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:20 crc kubenswrapper[4760]: E1124 17:04:20.466604 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.546878 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.546930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.546952 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.546997 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.547078 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.650367 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.650429 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.650446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.650470 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.650490 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.753382 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.753457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.753476 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.753502 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.753521 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.856775 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.856831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.856848 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.856874 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.856897 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.960156 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.960207 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.960226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.960252 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:20 crc kubenswrapper[4760]: I1124 17:04:20.960271 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:20Z","lastTransitionTime":"2025-11-24T17:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.063723 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.063811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.063833 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.063863 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.063886 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.167384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.167459 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.167484 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.167508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.167526 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.270389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.270445 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.270468 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.270586 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.270618 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.376617 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.376682 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.376702 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.376730 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.376752 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.465708 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:21 crc kubenswrapper[4760]: E1124 17:04:21.465917 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.467379 4760 scope.go:117] "RemoveContainer" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.482537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.482654 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.482675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.482704 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.482725 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.585941 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.586043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.586081 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.586109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.586132 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.689381 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.689446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.689463 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.689489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.689506 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.793301 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.793378 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.793401 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.793430 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.793459 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.896526 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.896593 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.896612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.896637 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.896654 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.922149 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/1.log" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.926469 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.927183 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.953190 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.974884 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999688 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999714 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999733 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:21Z","lastTransitionTime":"2025-11-24T17:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:21 crc kubenswrapper[4760]: I1124 17:04:21.999742 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:21Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.029251 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.060400 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.076654 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.088527 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.102393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.102427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.102441 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.102457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.102469 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.104942 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.119209 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.133416 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.147402 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.166538 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.179701 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.194753 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.205101 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.205146 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.205161 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.205205 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.205220 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.211501 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.227839 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.241091 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.308712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.308778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.308798 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.308825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.308843 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.412139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.412214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.412232 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.412265 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.412284 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.466141 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.466242 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.466302 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.466161 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.466467 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.466551 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.515200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.515728 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.515751 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.515778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.515796 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.622226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.622284 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.622298 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.622320 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.622336 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.725285 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.725365 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.725389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.725419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.725440 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.829691 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.829776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.829795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.829829 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.829850 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.885936 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.886079 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.886100 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.886173 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.886194 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.908190 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.916959 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.917044 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.917068 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.917106 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.917127 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.932925 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/2.log" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.934437 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/1.log" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.939986 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" exitCode=1 Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.940174 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.940301 4760 scope.go:117] "RemoveContainer" containerID="15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.940849 4760 scope.go:117] "RemoveContainer" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.940760 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.941061 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.946702 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.946771 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.946792 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.946818 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.946836 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.964567 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.968773 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.976609 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.976652 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.976670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.976696 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.976715 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:22Z","lastTransitionTime":"2025-11-24T17:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.980189 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: I1124 17:04:22.994943 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:22 crc kubenswrapper[4760]: E1124 17:04:22.996645 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:22Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.004752 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.004809 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.004827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.004853 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.004871 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.010723 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: E1124 17:04:23.026769 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: E1124 17:04:23.026998 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.029290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.029358 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.029376 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.029403 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.029422 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.033323 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.058888 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.078143 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.091990 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.110474 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.128338 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.133142 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.133197 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.133215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.133240 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.133258 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.147594 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.166744 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.187285 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.207199 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.226998 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.236849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.236924 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.236942 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.236971 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.236992 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.250307 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.282405 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d5637b15e08b1e6d52ddefba7743d93566ebd19a2daecebd2c266e6ff97470\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"message\\\":\\\":311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.966417 6215 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:03:57.967170 6215 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 17:03:57.967231 6215 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 17:03:57.967277 6215 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 17:03:57.967312 6215 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1124 17:03:57.967353 6215 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 17:03:57.967383 6215 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 17:03:57.967417 6215 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 17:03:57.967465 6215 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 17:03:57.967482 6215 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 17:03:57.967531 6215 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 17:03:57.967549 6215 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 17:03:57.967548 6215 factory.go:656] Stopping watch factory\\\\nI1124 17:03:57.967564 6215 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.341108 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.341169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.341192 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.341222 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.341246 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.444069 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.444157 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.444182 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.444214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.444236 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.465921 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:23 crc kubenswrapper[4760]: E1124 17:04:23.466183 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.547065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.547159 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.547190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.547218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.547269 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.650541 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.650622 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.650646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.650676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.650698 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.754418 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.754470 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.754487 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.754512 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.754530 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.858889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.858949 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.858965 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.858988 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.859036 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.947528 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/2.log" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.954519 4760 scope.go:117] "RemoveContainer" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" Nov 24 17:04:23 crc kubenswrapper[4760]: E1124 17:04:23.954819 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.966396 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.966468 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.966495 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.966526 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.966548 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:23Z","lastTransitionTime":"2025-11-24T17:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.975728 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:23 crc kubenswrapper[4760]: I1124 17:04:23.994904 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:23Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.012952 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.028838 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.048502 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.070204 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.070270 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.070292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.070327 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.070346 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.072523 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.094839 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.113855 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.133453 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.155983 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.174446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.174523 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.174539 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.174566 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.174589 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.175646 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.194159 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.226460 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.250588 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.271146 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.277793 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.277854 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.277874 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.277901 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.277920 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.297728 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.321534 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:24Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.381692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.381761 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.381779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.381811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.381832 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.465454 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.465554 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:24 crc kubenswrapper[4760]: E1124 17:04:24.465740 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.466487 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:24 crc kubenswrapper[4760]: E1124 17:04:24.466748 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:24 crc kubenswrapper[4760]: E1124 17:04:24.466920 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.484958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.485065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.485098 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.485124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.485142 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.588346 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.588422 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.588441 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.588467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.588490 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.692318 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.692395 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.692421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.692449 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.692474 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.795525 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.795583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.795603 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.795629 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.795687 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.898596 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.898646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.898656 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.898674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:24 crc kubenswrapper[4760]: I1124 17:04:24.898686 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:24Z","lastTransitionTime":"2025-11-24T17:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.001242 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.001320 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.001339 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.001366 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.001386 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.105097 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.105155 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.105171 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.105196 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.105216 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.207799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.207867 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.207885 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.207911 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.207930 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.310457 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.310516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.310534 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.310561 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.310580 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.413826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.413914 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.413939 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.413967 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.413990 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.465548 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:25 crc kubenswrapper[4760]: E1124 17:04:25.465720 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.489733 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.506063 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.517361 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.517421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.517440 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.517464 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.517482 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.525376 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.544204 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.570857 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.594594 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.614378 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.621557 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.621615 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.621644 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.621676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.621697 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.632228 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.654838 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.675109 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.694452 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.712178 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.725128 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.725188 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.725207 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.725231 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.725250 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.733120 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.753498 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.774295 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.801218 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.829359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.829433 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.829453 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.829479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.829497 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.838604 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:25Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.933699 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.933816 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.933843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.933869 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:25 crc kubenswrapper[4760]: I1124 17:04:25.933887 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:25Z","lastTransitionTime":"2025-11-24T17:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.036774 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.036830 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.036849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.036876 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.036894 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.140400 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.140469 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.140496 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.140525 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.140548 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.243250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.243332 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.243359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.243393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.243419 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.346553 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.346618 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.346646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.346676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.346694 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.450303 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.450372 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.450388 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.450413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.450430 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.465819 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.465945 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.465983 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:26 crc kubenswrapper[4760]: E1124 17:04:26.466177 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:26 crc kubenswrapper[4760]: E1124 17:04:26.466341 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:26 crc kubenswrapper[4760]: E1124 17:04:26.466460 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.554603 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.554674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.554691 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.554718 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.554737 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.658633 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.658707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.658726 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.658758 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.658778 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.762114 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.762164 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.762178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.762199 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.762222 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.866556 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.866636 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.866650 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.866671 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.866684 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.969645 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.969779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.969806 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.969839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:26 crc kubenswrapper[4760]: I1124 17:04:26.969865 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:26Z","lastTransitionTime":"2025-11-24T17:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.072993 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.073091 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.073109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.073135 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.073154 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.177447 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.177544 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.177571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.177609 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.177635 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.281201 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.281275 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.281292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.281318 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.281335 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.384779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.384836 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.384854 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.384882 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.384900 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.465539 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:27 crc kubenswrapper[4760]: E1124 17:04:27.465835 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.487800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.487857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.487875 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.487897 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.487916 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.591516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.591578 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.591595 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.591619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.591638 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.695484 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.695550 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.695568 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.695592 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.695611 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.798988 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.799057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.799070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.799090 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.799102 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.902176 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.902233 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.902242 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.902260 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:27 crc kubenswrapper[4760]: I1124 17:04:27.902270 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:27Z","lastTransitionTime":"2025-11-24T17:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.005196 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.005237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.005245 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.005263 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.005275 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.108780 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.108846 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.108857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.108878 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.108891 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.211226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.211281 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.211292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.211313 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.211325 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.314616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.314672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.314690 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.314714 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.314727 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.418175 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.418226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.418243 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.418264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.418278 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.466042 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.466059 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.466260 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:28 crc kubenswrapper[4760]: E1124 17:04:28.466389 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:28 crc kubenswrapper[4760]: E1124 17:04:28.466520 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:28 crc kubenswrapper[4760]: E1124 17:04:28.466715 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.520643 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.520720 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.520749 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.520786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.520816 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.623231 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.623291 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.623314 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.623336 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.623354 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.727187 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.727250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.727263 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.727289 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.727304 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.835986 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.836095 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.836119 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.836151 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.836176 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.939293 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.939353 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.939369 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.939392 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:28 crc kubenswrapper[4760]: I1124 17:04:28.939439 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:28Z","lastTransitionTime":"2025-11-24T17:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.042362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.042416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.042427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.042442 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.042454 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.145125 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.145179 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.145196 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.145216 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.145233 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.247786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.247865 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.247883 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.247910 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.247929 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.350625 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.350694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.350712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.350737 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.350757 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.454342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.454417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.454437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.454467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.454488 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.465821 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:29 crc kubenswrapper[4760]: E1124 17:04:29.466001 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.556982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.557075 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.557095 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.557155 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.557174 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.660872 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.660943 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.660961 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.660993 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.661034 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.764386 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.764440 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.764452 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.764471 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.764487 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.868149 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.868224 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.868245 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.868274 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.868294 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.971460 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.971735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.971750 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.971773 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:29 crc kubenswrapper[4760]: I1124 17:04:29.971787 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:29Z","lastTransitionTime":"2025-11-24T17:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.074775 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.074850 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.075088 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.075109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.075122 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.178348 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.178407 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.178417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.178444 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.178455 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.282148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.282195 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.282204 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.282221 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.282234 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.385107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.385168 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.385184 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.385210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.385230 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.466069 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.466078 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.466329 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:30 crc kubenswrapper[4760]: E1124 17:04:30.466479 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:30 crc kubenswrapper[4760]: E1124 17:04:30.466576 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:30 crc kubenswrapper[4760]: E1124 17:04:30.466980 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.478986 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.487669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.487723 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.487741 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.487770 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.487795 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.590461 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.590533 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.590552 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.590578 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.590598 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.693829 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.693901 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.693912 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.693929 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.693944 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.796903 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.796942 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.796955 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.796973 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.796989 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.899954 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.900096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.900118 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.900145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:30 crc kubenswrapper[4760]: I1124 17:04:30.900163 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:30Z","lastTransitionTime":"2025-11-24T17:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.002541 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.002595 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.002606 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.002619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.002628 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.105888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.105944 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.105958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.105983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.106028 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.210250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.210302 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.210312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.210331 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.210343 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.313569 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.313616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.313625 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.313642 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.313653 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.416670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.416726 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.416737 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.416765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.416774 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.465361 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:31 crc kubenswrapper[4760]: E1124 17:04:31.465638 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.521265 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.521301 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.521312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.521331 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.521342 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.626315 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.626384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.626402 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.626428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.626448 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.729280 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.729359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.729387 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.729421 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.729444 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.833064 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.833139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.833160 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.833189 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.833207 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.941262 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.941321 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.941339 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.941364 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:31 crc kubenswrapper[4760]: I1124 17:04:31.941382 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:31Z","lastTransitionTime":"2025-11-24T17:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.046551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.046648 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.046673 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.046715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.046743 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.153102 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.153171 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.153191 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.153216 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.153235 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.257467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.257553 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.257579 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.257613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.257646 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.361399 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.361471 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.361490 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.361522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.361539 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.463434 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.463495 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.463508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.463532 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.463548 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.466133 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.466133 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.466178 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:32 crc kubenswrapper[4760]: E1124 17:04:32.466474 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:32 crc kubenswrapper[4760]: E1124 17:04:32.466635 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:32 crc kubenswrapper[4760]: E1124 17:04:32.466782 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.565799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.565880 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.565908 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.565941 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.565961 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.669591 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.669671 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.669688 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.669716 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.669735 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.766098 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:32 crc kubenswrapper[4760]: E1124 17:04:32.766384 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:32 crc kubenswrapper[4760]: E1124 17:04:32.766479 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:05:04.766453076 +0000 UTC m=+100.089334656 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.772514 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.772594 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.772618 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.772651 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.772677 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.875358 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.875424 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.875444 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.875473 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.875496 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.977735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.977810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.977827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.977857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:32 crc kubenswrapper[4760]: I1124 17:04:32.977879 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:32Z","lastTransitionTime":"2025-11-24T17:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.081383 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.081453 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.081475 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.081505 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.081527 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.184347 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.184414 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.184437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.184467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.184488 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.286419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.286470 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.286484 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.286520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.286534 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.328428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.328488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.328498 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.328520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.328534 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.347068 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:33Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.351549 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.351590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.351604 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.351624 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.351638 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.367099 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:33Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.370956 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.370984 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.370995 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.371026 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.371037 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.386167 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:33Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.390322 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.390407 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.390464 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.390551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.390622 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.404745 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:33Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.408950 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.408996 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.409052 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.409084 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.409103 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.431262 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:33Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.431678 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.433652 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.433713 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.433724 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.433741 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.433754 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.467269 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:33 crc kubenswrapper[4760]: E1124 17:04:33.467462 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.537172 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.537214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.537225 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.537241 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.537251 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.641064 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.641118 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.641133 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.641155 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.641171 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.745045 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.745593 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.745865 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.746110 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.746261 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.849857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.849925 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.849943 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.849968 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.849987 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.952990 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.953091 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.953118 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.953150 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:33 crc kubenswrapper[4760]: I1124 17:04:33.953175 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:33Z","lastTransitionTime":"2025-11-24T17:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.055900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.055973 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.055990 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.056029 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.056044 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.158909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.158947 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.158958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.158972 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.158984 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.262143 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.262201 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.262213 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.262230 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.262240 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.364580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.364823 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.364889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.364958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.365211 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.466324 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.466393 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:34 crc kubenswrapper[4760]: E1124 17:04:34.466576 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:34 crc kubenswrapper[4760]: E1124 17:04:34.466599 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.466965 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:34 crc kubenswrapper[4760]: E1124 17:04:34.467593 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.468698 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.468746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.468763 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.468785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.468803 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.572020 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.572082 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.572100 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.572121 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.572136 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.675578 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.675641 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.675654 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.675678 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.675693 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.777952 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.778033 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.778050 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.778073 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.778091 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.880186 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.880453 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.880595 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.880760 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.880901 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.983799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.983834 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.983842 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.983857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:34 crc kubenswrapper[4760]: I1124 17:04:34.983866 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:34Z","lastTransitionTime":"2025-11-24T17:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.000586 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/0.log" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.000630 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" containerID="ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f" exitCode=1 Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.000656 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerDied","Data":"ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.000975 4760 scope.go:117] "RemoveContainer" containerID="ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.017588 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.037065 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.052181 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.067925 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.098376 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.098428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.098437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.098453 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.098462 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.100615 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.113102 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.130691 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.143184 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.159250 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.172464 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.192041 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.200646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.200679 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.200688 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.200703 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.200714 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.211861 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.229957 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.246988 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.268916 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.290465 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.302958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.303024 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.303038 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.303056 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.303068 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.309869 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.327695 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.406340 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.406400 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.406423 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.406454 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.406477 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.466410 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:35 crc kubenswrapper[4760]: E1124 17:04:35.466616 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.487560 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.503804 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.508831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.508856 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.508867 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.508881 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.508891 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.519577 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.533351 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.552204 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.585608 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.600211 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.611835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.611889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.611910 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.611934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.611953 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.615211 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.633564 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.651400 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.671477 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.699721 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.715503 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.715563 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.715580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.715605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.715632 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.716879 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.734636 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.754370 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.776576 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.799232 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.816870 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:35Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.818134 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.818392 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.818612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.818839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.819085 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.923953 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.924026 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.924037 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.924053 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:35 crc kubenswrapper[4760]: I1124 17:04:35.924064 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:35Z","lastTransitionTime":"2025-11-24T17:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.009236 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/0.log" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.009342 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerStarted","Data":"00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.028804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.028869 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.028889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.028916 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.028942 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.030206 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.053739 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.072818 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.089696 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.118339 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.132860 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.132919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.132935 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.132958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.132977 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.146203 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.169776 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.190520 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.202329 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.212866 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.225802 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.235791 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.235840 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.235857 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.235878 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.235891 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.242947 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.257666 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.293139 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.305220 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.321414 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.335421 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.338493 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.338621 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.338714 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.338809 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.338895 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.351248 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:36Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.442116 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.442168 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.442181 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.442200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.442213 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.465581 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.465618 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:36 crc kubenswrapper[4760]: E1124 17:04:36.465716 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:36 crc kubenswrapper[4760]: E1124 17:04:36.465830 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.466437 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:36 crc kubenswrapper[4760]: E1124 17:04:36.466891 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.545374 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.545427 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.545441 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.545462 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.545478 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.648718 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.648753 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.648776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.648790 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.648989 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.751780 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.752179 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.752866 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.752968 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.753091 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.855848 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.855934 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.855952 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.855976 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.855994 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.978742 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.978808 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.978831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.978859 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:36 crc kubenswrapper[4760]: I1124 17:04:36.978880 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:36Z","lastTransitionTime":"2025-11-24T17:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.082070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.082125 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.082138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.082162 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.082177 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.185043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.185085 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.185098 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.185116 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.185127 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.288282 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.288335 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.288350 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.288372 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.288389 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.391482 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.391528 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.391536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.391552 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.391561 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.465412 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:37 crc kubenswrapper[4760]: E1124 17:04:37.465583 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.494019 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.494073 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.494083 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.494102 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.494112 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.597795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.598296 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.598308 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.598325 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.598336 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.701343 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.701451 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.701472 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.701493 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.701506 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.804528 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.804606 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.804631 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.804662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.804690 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.908669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.908746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.908765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.908788 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:37 crc kubenswrapper[4760]: I1124 17:04:37.908808 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:37Z","lastTransitionTime":"2025-11-24T17:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.011868 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.011947 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.011970 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.012029 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.012095 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.115592 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.115658 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.115676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.115706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.115724 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.219442 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.219498 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.219508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.219524 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.219537 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.323186 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.323254 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.323271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.323299 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.323318 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.426578 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.426627 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.426638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.426657 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.426668 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.465665 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.465753 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.465750 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:38 crc kubenswrapper[4760]: E1124 17:04:38.465836 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:38 crc kubenswrapper[4760]: E1124 17:04:38.465999 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:38 crc kubenswrapper[4760]: E1124 17:04:38.466174 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.467071 4760 scope.go:117] "RemoveContainer" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" Nov 24 17:04:38 crc kubenswrapper[4760]: E1124 17:04:38.467268 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.530652 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.531286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.531649 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.531801 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.531925 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.635616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.635975 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.636279 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.636534 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.636727 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.741214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.741259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.741269 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.741287 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.741297 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.843692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.843756 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.843773 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.843799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.843817 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.947274 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.947360 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.947379 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.947408 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:38 crc kubenswrapper[4760]: I1124 17:04:38.947428 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:38Z","lastTransitionTime":"2025-11-24T17:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.051393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.051827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.052058 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.052451 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.052767 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.156894 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.157956 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.158186 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.158392 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.158593 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.262134 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.262515 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.262585 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.262655 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.262718 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.365732 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.365827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.365843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.365863 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.365876 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.466084 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:39 crc kubenswrapper[4760]: E1124 17:04:39.466257 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.468838 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.469109 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.469280 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.469443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.469597 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.573997 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.574107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.574127 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.574159 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.574180 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.678449 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.678509 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.678526 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.678550 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.678567 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.781219 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.781568 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.781674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.781782 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.781869 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.884612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.884957 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.885099 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.885214 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.885310 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.989456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.989559 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.989588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.989624 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:39 crc kubenswrapper[4760]: I1124 17:04:39.989661 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:39Z","lastTransitionTime":"2025-11-24T17:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.092801 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.092843 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.092856 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.092873 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.092907 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.196782 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.196836 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.196856 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.196883 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.196902 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.300174 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.300262 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.300277 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.300302 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.300320 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.402604 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.402686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.402713 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.402748 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.402772 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.466482 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.466549 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.466614 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:40 crc kubenswrapper[4760]: E1124 17:04:40.466699 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:40 crc kubenswrapper[4760]: E1124 17:04:40.467061 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:40 crc kubenswrapper[4760]: E1124 17:04:40.466888 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.506178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.506252 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.506279 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.506310 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.506334 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.609627 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.610179 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.610318 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.610456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.610718 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.714800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.714880 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.714897 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.714926 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.714946 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.818059 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.818123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.818144 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.818171 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.818264 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.921343 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.921413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.921433 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.921458 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:40 crc kubenswrapper[4760]: I1124 17:04:40.921477 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:40Z","lastTransitionTime":"2025-11-24T17:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.024492 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.024560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.024582 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.024776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.024845 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.128095 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.128159 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.128178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.128203 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.128224 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.231653 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.231723 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.231740 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.231765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.231785 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.335092 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.335160 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.335178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.335202 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.335221 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.438538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.438593 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.438613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.438638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.438657 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.466088 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:41 crc kubenswrapper[4760]: E1124 17:04:41.466297 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.541935 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.542047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.542076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.542107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.542131 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.646259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.646607 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.646789 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.646940 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.647117 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.751230 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.751675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.751977 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.752262 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.752457 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.857199 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.857691 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.857986 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.858505 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.858957 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.963103 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.963752 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.963928 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.964141 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:41 crc kubenswrapper[4760]: I1124 17:04:41.964284 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:41Z","lastTransitionTime":"2025-11-24T17:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.067841 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.067902 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.067922 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.067950 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.067969 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.176101 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.176159 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.176176 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.176200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.176218 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.279726 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.280723 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.280888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.281074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.281229 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.384736 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.384785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.384807 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.384830 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.384849 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.465436 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:42 crc kubenswrapper[4760]: E1124 17:04:42.465638 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.465793 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.465826 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:42 crc kubenswrapper[4760]: E1124 17:04:42.466091 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:42 crc kubenswrapper[4760]: E1124 17:04:42.466160 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.488094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.488173 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.488199 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.488232 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.488255 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.591503 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.591578 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.591596 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.591626 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.591653 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.694799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.694866 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.694884 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.694909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.694932 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.798264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.798326 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.798343 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.798367 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.798387 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.901809 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.902258 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.902443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.902602 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:42 crc kubenswrapper[4760]: I1124 17:04:42.902740 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:42Z","lastTransitionTime":"2025-11-24T17:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.005812 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.005871 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.005890 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.005914 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.005932 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.108649 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.108705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.108722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.108744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.108762 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.211494 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.211575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.211612 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.211640 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.211657 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.314424 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.314483 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.314501 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.314530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.314550 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.418053 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.418153 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.418177 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.418208 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.418230 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.466485 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.466681 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.521536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.521605 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.521628 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.521662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.521688 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.625238 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.625318 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.625340 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.625379 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.625402 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.701204 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.701298 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.701328 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.701362 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.701385 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.725161 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:43Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.730841 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.730923 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.730946 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.730980 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.731038 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.750819 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:43Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.755744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.755795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.755811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.755835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.755856 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.775414 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:43Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.780207 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.780265 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.780283 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.780307 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.780326 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.798659 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:43Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.804246 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.804300 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.804310 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.804332 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.804346 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.821558 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:43Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:43 crc kubenswrapper[4760]: E1124 17:04:43.821796 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.823905 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.823989 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.824040 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.824071 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.824089 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.927166 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.927226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.927246 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.927272 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:43 crc kubenswrapper[4760]: I1124 17:04:43.927291 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:43Z","lastTransitionTime":"2025-11-24T17:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.030715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.030776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.030799 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.030826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.030843 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.134087 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.134151 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.134169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.134194 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.134213 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.237588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.237672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.237693 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.237719 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.237739 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.341226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.341429 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.341497 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.341531 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.341563 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.444998 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.445112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.445137 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.445172 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.445194 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.465690 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:44 crc kubenswrapper[4760]: E1124 17:04:44.465974 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.465712 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:44 crc kubenswrapper[4760]: E1124 17:04:44.466179 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.465690 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:44 crc kubenswrapper[4760]: E1124 17:04:44.466322 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.548690 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.548759 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.548777 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.548803 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.548821 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.652266 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.652337 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.652359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.652389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.652411 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.757800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.757860 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.757881 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.757909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.757930 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.860982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.861049 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.861062 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.861082 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.861097 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.964284 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.964401 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.964422 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.964447 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:44 crc kubenswrapper[4760]: I1124 17:04:44.964467 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:44Z","lastTransitionTime":"2025-11-24T17:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.067198 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.067530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.067640 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.067765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.067861 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.171313 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.171550 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.171702 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.171810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.171896 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.275283 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.275342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.275359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.275382 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.275396 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.378549 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.378602 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.378620 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.378646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.378666 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.466321 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:45 crc kubenswrapper[4760]: E1124 17:04:45.466508 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.482727 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.482804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.482826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.482886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.482910 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.484445 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.502797 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.518101 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.537942 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.556788 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.573931 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.587506 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.587556 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.587573 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.587596 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.587617 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.592283 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.612576 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.634547 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.654457 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.671929 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.689633 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.692668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.692766 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.692795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.692826 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.692850 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.710526 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.733636 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.760197 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.793176 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.796424 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.796538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.796577 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.796622 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.796669 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.821998 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.842320 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:45Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.900443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.900515 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.900533 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.900560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:45 crc kubenswrapper[4760]: I1124 17:04:45.900582 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:45Z","lastTransitionTime":"2025-11-24T17:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.003893 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.003975 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.004000 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.004070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.004098 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.107417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.107500 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.107523 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.107550 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.107572 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.211053 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.211165 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.211190 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.211237 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.211261 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.313500 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.313575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.313598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.313629 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.313653 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.417030 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.417088 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.417105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.417128 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.417144 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.466195 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.466227 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.466241 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:46 crc kubenswrapper[4760]: E1124 17:04:46.466363 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:46 crc kubenswrapper[4760]: E1124 17:04:46.466493 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:46 crc kubenswrapper[4760]: E1124 17:04:46.466631 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.520468 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.520522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.520538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.520567 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.520592 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.639072 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.639139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.639160 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.639189 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.639207 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.742144 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.742210 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.742229 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.742255 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.742273 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.845779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.846239 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.846397 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.846537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.846657 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.950200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.950285 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.950311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.950349 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:46 crc kubenswrapper[4760]: I1124 17:04:46.950374 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:46Z","lastTransitionTime":"2025-11-24T17:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.054110 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.054169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.054188 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.054215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.054234 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.157498 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.157753 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.157948 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.158236 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.158415 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.261896 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.261958 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.261976 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.262027 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.262050 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.364883 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.365173 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.365215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.365255 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.365279 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.465717 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:47 crc kubenswrapper[4760]: E1124 17:04:47.465959 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.467556 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.467648 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.467668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.467692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.467712 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.571162 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.571227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.571244 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.571268 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.571286 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.674680 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.674754 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.674779 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.674804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.674821 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.777668 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.777728 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.777744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.777765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.777778 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.880907 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.881284 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.881486 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.881630 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.881762 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.985198 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.985276 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.985295 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.985321 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:47 crc kubenswrapper[4760]: I1124 17:04:47.985340 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:47Z","lastTransitionTime":"2025-11-24T17:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.088871 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.088919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.088935 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.088957 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.088974 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.192328 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.192414 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.192443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.192474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.192496 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.295821 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.295921 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.295939 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.295964 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.295982 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.399572 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.399694 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.399712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.399741 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.399758 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.465681 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.465721 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.466414 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:48 crc kubenswrapper[4760]: E1124 17:04:48.466758 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:48 crc kubenswrapper[4760]: E1124 17:04:48.466932 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:48 crc kubenswrapper[4760]: E1124 17:04:48.466652 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.503735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.503788 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.503806 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.503833 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.503851 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.608707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.608762 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.608782 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.608813 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.608837 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.712456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.712519 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.712538 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.712568 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.712698 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.815954 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.816497 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.816708 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.816937 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.817195 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.920900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.920990 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.921058 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.921092 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:48 crc kubenswrapper[4760]: I1124 17:04:48.921114 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:48Z","lastTransitionTime":"2025-11-24T17:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.024406 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.024494 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.024518 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.024551 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.024573 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.128369 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.128446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.128468 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.128504 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.128527 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.233148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.233209 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.233225 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.233249 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.233266 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.336201 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.336311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.336332 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.336359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.336378 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.439516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.439613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.439632 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.439656 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.439674 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.466304 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:49 crc kubenswrapper[4760]: E1124 17:04:49.466497 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.542988 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.543130 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.543150 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.543177 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.543197 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.647335 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.647398 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.647416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.647443 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.647461 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.751375 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.751434 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.751454 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.751479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.751497 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.855783 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.855838 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.855859 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.855897 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.855916 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.959313 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.959393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.959412 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.959437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:49 crc kubenswrapper[4760]: I1124 17:04:49.959455 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:49Z","lastTransitionTime":"2025-11-24T17:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.061993 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.062097 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.062115 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.062139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.062156 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.165721 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.165802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.165832 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.165869 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.165895 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.185188 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.185413 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.185451 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.185471 4760 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.185807 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.185748547 +0000 UTC m=+149.508630127 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.269388 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.269442 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.269455 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.269474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.269488 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.286258 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.286513 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.286477483 +0000 UTC m=+149.609359053 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.286643 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.286772 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.286856 4760 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.286972 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.286994 4760 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.287040 4760 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.287249 4760 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.287287 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.287275926 +0000 UTC m=+149.610157486 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.287313 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.287430 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.287334507 +0000 UTC m=+149.610216097 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.287526 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.287496842 +0000 UTC m=+149.610378432 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.373405 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.373473 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.373490 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.373511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.373528 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.465727 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.465787 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.466289 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.466546 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.466748 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:50 crc kubenswrapper[4760]: E1124 17:04:50.466922 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.466948 4760 scope.go:117] "RemoveContainer" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.475750 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.475797 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.475816 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.475839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.475860 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.578813 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.579300 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.579325 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.579358 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.579379 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.683795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.683852 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.683870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.683895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.683912 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.786917 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.786977 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.786996 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.787070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.787097 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.890404 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.890472 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.890489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.890520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.890545 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.994786 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.994859 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.994889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.994922 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:50 crc kubenswrapper[4760]: I1124 17:04:50.994945 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:50Z","lastTransitionTime":"2025-11-24T17:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.068546 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/2.log" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.072289 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.072947 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.097816 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.097861 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.097870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.097886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.097895 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.098874 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.114311 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.126839 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.140967 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.156844 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.169760 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.185488 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.196478 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.200642 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.200697 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.200710 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.200729 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.200743 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.217827 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.239901 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.252441 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.270191 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.283529 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.297974 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.307895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.307951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.307964 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.307983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.307995 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.326196 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.354183 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.385714 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.399246 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.410330 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.410375 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.410388 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.410404 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.410418 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.465556 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:51 crc kubenswrapper[4760]: E1124 17:04:51.465869 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.514105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.514139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.514152 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.514168 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.514177 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.617394 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.617455 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.617473 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.617501 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.617519 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.720057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.720113 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.720131 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.720156 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.720176 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.823101 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.823501 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.823664 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.823808 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.823938 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.927534 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.927906 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.928093 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.928271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:51 crc kubenswrapper[4760]: I1124 17:04:51.928402 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:51Z","lastTransitionTime":"2025-11-24T17:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.031215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.031274 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.031293 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.031316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.031333 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.080348 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/3.log" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.081644 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/2.log" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.085923 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.085990 4760 scope.go:117] "RemoveContainer" containerID="1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.085787 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" exitCode=1 Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.087116 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:04:52 crc kubenswrapper[4760]: E1124 17:04:52.087732 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.109781 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143372 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143805 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143818 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143572 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.143854 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.167132 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.190071 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.218372 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1ed796cf6e1b7e4a7f6e59093dfe2212a17609113ad442245a6bbfd896550d1a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:22Z\\\",\\\"message\\\":\\\"reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.485527 6513 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.485785 6513 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 17:04:22.486040 6513 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486064 6513 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 17:04:22.486116 6513 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 17:04:22.486527 6513 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 17:04:22.486551 6513 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 17:04:22.486589 6513 factory.go:656] Stopping watch factory\\\\nI1124 17:04:22.486608 6513 ovnkube.go:599] Stopped ovnkube\\\\nI1124 17:04:22.486639 6513 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:51Z\\\",\\\"message\\\":\\\"rc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z]\\\\nI1124 17:04:51.500188 6871 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.234547 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.247196 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.247255 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.247272 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.247296 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.247315 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.251616 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.268780 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.284245 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.302247 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.324484 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.345692 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.349888 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.349929 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.349944 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.349965 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.349979 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.365039 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.380714 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.400070 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.419860 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.437255 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.452687 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.452778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.452803 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.452837 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.452861 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.460137 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:52Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.465530 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:52 crc kubenswrapper[4760]: E1124 17:04:52.465725 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.465995 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:52 crc kubenswrapper[4760]: E1124 17:04:52.466131 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.466236 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:52 crc kubenswrapper[4760]: E1124 17:04:52.466350 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.556906 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.557094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.557125 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.557156 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.557174 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.659994 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.660100 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.660128 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.660160 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.660185 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.764033 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.764095 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.764112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.764140 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.764160 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.867238 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.867299 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.867316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.867339 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.867357 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.970630 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.970704 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.970721 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.970746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:52 crc kubenswrapper[4760]: I1124 17:04:52.970765 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:52Z","lastTransitionTime":"2025-11-24T17:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.073896 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.073955 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.073975 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.074025 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.074044 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.093552 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/3.log" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.098829 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:04:53 crc kubenswrapper[4760]: E1124 17:04:53.099108 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.117940 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.141365 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.157947 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.174630 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.178575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.178622 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.178639 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.178663 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.178679 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.190788 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.209359 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.230183 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.250757 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.270382 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.282472 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.282520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.282536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.282561 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.282582 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.289475 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.309698 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.327955 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.346085 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.380334 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:51Z\\\",\\\"message\\\":\\\"rc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z]\\\\nI1124 17:04:51.500188 6871 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.385648 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.385705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.385722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.385747 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.385796 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.404936 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.425969 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.447538 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.466444 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:53 crc kubenswrapper[4760]: E1124 17:04:53.466652 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.474293 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:53Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.488583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.488638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.488684 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.488707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.488725 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.592112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.592194 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.592213 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.592240 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.592263 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.695137 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.695493 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.695641 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.695830 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.695966 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.798712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.799157 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.799343 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.799524 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.799750 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.903343 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.903397 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.903416 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.903439 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:53 crc kubenswrapper[4760]: I1124 17:04:53.903456 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:53Z","lastTransitionTime":"2025-11-24T17:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.006240 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.006299 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.006317 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.006345 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.006367 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.030686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.030771 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.030792 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.030825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.030849 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.052716 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.057776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.057837 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.057854 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.057878 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.057895 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.080081 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.085565 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.085626 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.085646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.085674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.085693 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.106933 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.112672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.112739 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.112756 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.112778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.112795 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.133945 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.139673 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.139730 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.139750 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.139780 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.139806 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.161252 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:54Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.161542 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.163895 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.163959 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.163985 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.164048 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.164075 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.266725 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.266780 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.266800 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.266829 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.266855 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.369248 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.369312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.369336 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.369363 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.369379 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.466280 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.466319 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.466947 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.467252 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.467159 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:54 crc kubenswrapper[4760]: E1124 17:04:54.467729 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.472582 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.472643 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.472670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.472704 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.472725 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.576763 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.577293 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.577331 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.577360 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.577383 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.680524 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.680573 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.680590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.680617 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.680638 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.783501 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.783547 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.783562 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.783634 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.783664 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.886748 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.886803 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.886814 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.886835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.886847 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.990191 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.990261 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.990278 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.990316 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:54 crc kubenswrapper[4760]: I1124 17:04:54.990338 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:54Z","lastTransitionTime":"2025-11-24T17:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.093057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.093116 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.093133 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.093159 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.093178 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.196142 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.196203 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.196223 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.196248 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.196295 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.300065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.300144 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.300168 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.300201 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.300224 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.403662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.403712 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.403732 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.403764 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.403786 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.466411 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:55 crc kubenswrapper[4760]: E1124 17:04:55.466592 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.483085 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.501479 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.507623 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.507680 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.507701 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.507734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.507756 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.517584 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.540363 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.560821 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.581423 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.601045 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.610679 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.610746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.610765 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.610795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.610815 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.621259 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.641320 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.660790 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.677953 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.695430 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.714348 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.714391 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.714410 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.714436 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.714453 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.715037 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.736570 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.759687 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.791121 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:51Z\\\",\\\"message\\\":\\\"rc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z]\\\\nI1124 17:04:51.500188 6871 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.812061 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.822062 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.822129 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.822154 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.822186 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.822210 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.834469 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:55Z is after 2025-08-24T17:21:41Z" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.925831 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.925886 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.925904 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.925929 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:55 crc kubenswrapper[4760]: I1124 17:04:55.925949 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:55Z","lastTransitionTime":"2025-11-24T17:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.028496 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.028567 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.028587 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.028616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.028635 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.131944 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.132046 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.132065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.132093 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.132114 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.235356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.235485 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.235560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.235594 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.235660 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.339293 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.339370 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.339389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.339417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.339436 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.443664 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.443742 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.443763 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.443794 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.443817 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.465648 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:56 crc kubenswrapper[4760]: E1124 17:04:56.465774 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.465941 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:56 crc kubenswrapper[4760]: E1124 17:04:56.465985 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.466106 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:56 crc kubenswrapper[4760]: E1124 17:04:56.466155 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.546768 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.546811 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.546825 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.546840 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.546851 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.649909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.649977 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.649995 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.650045 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.650065 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.754149 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.754205 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.754223 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.754250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.754269 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.857923 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.858059 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.858081 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.858107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.858163 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.961947 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.962070 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.962089 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.962145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:56 crc kubenswrapper[4760]: I1124 17:04:56.962167 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:56Z","lastTransitionTime":"2025-11-24T17:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.065236 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.065297 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.065315 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.065339 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.065357 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.168230 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.168299 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.168326 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.168352 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.168369 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.271743 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.272218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.272384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.272531 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.272669 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.375634 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.375691 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.375708 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.375731 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.375749 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.465901 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:57 crc kubenswrapper[4760]: E1124 17:04:57.466140 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.478222 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.478304 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.478324 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.478346 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.478365 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.581789 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.581855 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.581875 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.581903 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.581923 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.685733 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.685794 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.685812 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.685841 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.685859 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.788500 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.788558 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.788575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.788598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.788616 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.891625 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.891682 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.891699 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.891722 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.891739 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.994946 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.995047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.995067 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.995092 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:57 crc kubenswrapper[4760]: I1124 17:04:57.995110 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:57Z","lastTransitionTime":"2025-11-24T17:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.097590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.097652 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.097669 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.097692 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.097710 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.201088 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.201241 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.201278 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.201345 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.201371 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.304272 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.304322 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.304340 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.304364 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.304381 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.413717 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.413776 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.413791 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.413810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.413826 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.466354 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.466606 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.466790 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:04:58 crc kubenswrapper[4760]: E1124 17:04:58.467118 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:04:58 crc kubenswrapper[4760]: E1124 17:04:58.467535 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:04:58 crc kubenswrapper[4760]: E1124 17:04:58.467641 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.516595 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.516642 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.516680 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.516701 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.516712 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.619057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.619094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.619105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.619123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.619135 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.721986 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.722125 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.722147 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.722175 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.722193 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.825406 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.825456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.825474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.825522 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.825540 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.929508 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.929589 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.929613 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.929644 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:58 crc kubenswrapper[4760]: I1124 17:04:58.929666 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:58Z","lastTransitionTime":"2025-11-24T17:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.033499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.033572 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.033596 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.033629 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.033651 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.136096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.136253 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.136274 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.136330 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.136350 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.238923 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.238964 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.238973 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.238989 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.239017 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.342393 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.342479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.342500 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.342532 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.342556 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.446123 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.446198 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.446227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.446259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.446280 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.465708 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:04:59 crc kubenswrapper[4760]: E1124 17:04:59.465899 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.549818 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.549885 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.549901 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.549926 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.549946 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.653683 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.653746 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.653763 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.653788 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.653808 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.757182 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.757241 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.757259 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.757286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.757303 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.859930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.859999 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.860043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.860068 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.860085 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.964083 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.964148 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.964166 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.964192 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:04:59 crc kubenswrapper[4760]: I1124 17:04:59.964210 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:04:59Z","lastTransitionTime":"2025-11-24T17:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.067583 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.067735 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.067756 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.067817 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.067837 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.171519 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.171591 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.171609 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.171638 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.171662 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.274900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.274965 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.274983 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.275074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.275108 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.378377 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.378439 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.378456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.378481 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.378500 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.466095 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.466111 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:00 crc kubenswrapper[4760]: E1124 17:05:00.466304 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.466121 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:00 crc kubenswrapper[4760]: E1124 17:05:00.466459 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:00 crc kubenswrapper[4760]: E1124 17:05:00.466597 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.482047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.482110 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.482129 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.482153 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.482174 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.585870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.585928 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.585946 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.585971 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.585990 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.689491 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.689564 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.689590 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.689619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.689641 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.793091 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.793185 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.793244 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.793271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.793325 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.896178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.896244 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.896262 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.896290 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.896314 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.999096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.999164 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.999180 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.999206 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:00 crc kubenswrapper[4760]: I1124 17:05:00.999225 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:00Z","lastTransitionTime":"2025-11-24T17:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.102271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.102399 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.102420 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.102445 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.102464 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.206157 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.206222 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.206243 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.206271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.206293 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.310037 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.310096 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.310112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.310136 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.310154 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.413678 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.413744 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.413760 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.413785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.413802 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.466147 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:01 crc kubenswrapper[4760]: E1124 17:05:01.466331 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.517355 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.517415 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.517432 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.517456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.517475 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.620951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.621074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.621107 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.621138 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.621161 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.723602 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.723655 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.723671 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.723696 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.723713 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.826734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.826790 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.826810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.826832 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.826882 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.929426 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.929489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.929512 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.929544 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:01 crc kubenswrapper[4760]: I1124 17:05:01.929564 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:01Z","lastTransitionTime":"2025-11-24T17:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.032937 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.033065 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.033088 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.033114 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.033132 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.136143 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.136220 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.136244 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.136275 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.136293 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.239205 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.239277 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.239295 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.239323 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.239342 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.352338 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.352413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.352435 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.352671 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.352700 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.456296 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.456356 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.456396 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.456428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.456451 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.465982 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.466391 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.466480 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:02 crc kubenswrapper[4760]: E1124 17:05:02.466324 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:02 crc kubenswrapper[4760]: E1124 17:05:02.467066 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:02 crc kubenswrapper[4760]: E1124 17:05:02.467142 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.560488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.560548 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.560565 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.560604 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.560629 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.663419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.663477 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.663496 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.663520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.663539 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.767312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.767373 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.767389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.767414 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.767431 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.870466 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.870511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.870524 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.870542 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.870554 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.973676 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.973739 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.973757 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.973782 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:02 crc kubenswrapper[4760]: I1124 17:05:02.973800 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:02Z","lastTransitionTime":"2025-11-24T17:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.077566 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.077627 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.077644 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.077670 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.077695 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.181431 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.181555 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.181574 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.181601 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.181622 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.285043 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.285112 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.285137 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.285169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.285197 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.388704 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.388781 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.388801 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.388827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.388846 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.466166 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:03 crc kubenswrapper[4760]: E1124 17:05:03.466363 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.491835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.491900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.491922 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.491948 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.491971 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.595069 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.595131 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.595149 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.595177 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.595195 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.698139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.698215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.698234 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.698260 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.698277 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.801369 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.801432 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.801450 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.801479 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.801497 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.904080 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.904117 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.904126 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.904140 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:03 crc kubenswrapper[4760]: I1124 17:05:03.904151 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:03Z","lastTransitionTime":"2025-11-24T17:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.007580 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.007658 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.007682 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.007708 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.007730 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.110474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.110530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.110554 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.110582 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.110601 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.213335 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.213395 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.213437 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.213462 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.213480 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.316804 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.316868 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.316887 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.316918 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.316939 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.416875 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.416940 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.416956 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.416982 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.417000 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.437904 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.443864 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.443930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.443951 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.443980 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.444042 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.464035 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.466389 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.466571 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.466460 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.466796 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.466987 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.467447 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.470425 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.470491 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.470509 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.470537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.470554 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.490802 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.497116 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.497209 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.497227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.497258 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.497276 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.522172 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.527640 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.527715 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.527734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.527762 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.527781 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.548414 4760 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T17:05:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c065c256-59f6-47bf-8461-0f224e5ef7ad\\\",\\\"systemUUID\\\":\\\"17737b2a-2300-48a8-a1cc-45163d19bbaa\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:04Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.548759 4760 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.551573 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.551662 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.551686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.551757 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.551783 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.655757 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.655865 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.655889 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.655915 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.655934 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.758750 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.758793 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.758805 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.758821 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.758833 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.781266 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.781396 4760 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:05:04 crc kubenswrapper[4760]: E1124 17:05:04.781471 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs podName:e462626d-5645-4be7-89b4-383a4cde08f9 nodeName:}" failed. No retries permitted until 2025-11-24 17:06:08.781453754 +0000 UTC m=+164.104335304 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs") pod "network-metrics-daemon-dz6vg" (UID: "e462626d-5645-4be7-89b4-383a4cde08f9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.862292 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.862361 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.862378 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.862404 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.862437 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.965312 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.965385 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.965403 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.965431 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:04 crc kubenswrapper[4760]: I1124 17:05:04.965454 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:04Z","lastTransitionTime":"2025-11-24T17:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.069778 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.069870 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.069890 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.069917 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.069935 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.173389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.173492 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.173511 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.173575 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.173596 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.277401 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.277471 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.277489 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.277516 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.277536 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.380656 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.380720 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.380738 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.380764 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.380784 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.465942 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:05 crc kubenswrapper[4760]: E1124 17:05:05.466294 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485187 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485253 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485272 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485299 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485320 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.485410 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4f9392f9-adab-4822-a67d-5d2dfb53d4d0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33b318e984a01d81c7f58a0baacf4008d87b91864854245c6187122e9666c4f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6056b065bf6dfaf56a2c144ff403c4b78b56abd959d481616d67fc2235f8faa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.507512 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.524404 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-49579" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7220d5de-3096-474d-af9b-1276a2e41bd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85180677ce21a7c21cf64ea357b889d825460a45635614badb263f024babaf89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bzjjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-49579\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.541854 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e462626d-5645-4be7-89b4-383a4cde08f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6sw94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:04:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-dz6vg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.560864 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76f85bc3-8953-42ca-8f48-237dbe9f1c68\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f75d105246ec0ae761ee486def5aa4486d9fcc974ec52edda7a09d88d643248b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d9797edacad19cae2c091de0233e533b1e626425947bd7655e3eb4ade3303c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6d9899ba488c290b597371c814caff16e3ccab04e36480af74d9b90a83355be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbc33b5023f321838637ed402e476f7fbfaef4ddbe1f8f80c509929f3e66d57b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.588215 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.588286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.588311 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.588342 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.588364 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.594518 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d91626b7f3e81336a3ecae240f44cc7884a9ea28aca7481851e3a2ae9131a4b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.617001 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.635664 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0046bf400b5fd5932b6f58c392a05d821199cd9b9675484f1b6c1d02ee094feb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.652155 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vx8zv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"98ad2e16-05ae-4094-93ad-d636fdbfecaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe7472b3a68daf4e5d1bf28a62f93eeeac8ef35c37b5f34e8a569f12e40f90cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8pkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vx8zv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.674954 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0634823-fb0a-4849-91a8-ad184c3479d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://273b0b7d4999a73a4ad7a08a6e287af00771e80c403f7507932d43ff54c8c0fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06aa00b391386ffcf9da08cf75c7023b976470ed26df279a2fc1f55812c5712f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://be1996f43db39e34c0d5c79131bc901a1ae4fbce8469cb3bafd7cdebaa2df054\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53ea8dce29acb0affe9290082ef335960c10070279df5a72670f3b3ccc22ebcb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.692080 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.692143 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.692164 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.692194 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.692213 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.694987 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.713642 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f71fb2ac-0373-4606-a20a-0b60ca26fbc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c2e64709246522a0619679bf6fbf315604ddb90a1c8955a52882f041e5bead3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rwqwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vgbxz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.733331 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bc066f-db82-440f-b301-ae9f092bbdb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4cd9e380d9a909ab0b59df34ba0f571b18951697cd8a8e84b07579f94384a13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a993f08ec5b956c7c3ff86eded533c088355749a6c2d70338df97fa5595fe1fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mxpt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gjlbz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.759511 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55704d49-8081-4cab-a47b-90e354e5c9f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6592ffa6bf7513fb38c987fc16222e60376da35f7d89b421986d242d29ee443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f2dd1f9cf737aac6be24797e2b371c3e83a7ad81a2102188d577803f7ad5fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4caa24a11cc13174ea7ae44ea490f2d1542a000ff14288fa392119be47609e8b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1876382128b4ff91b75bc53b07de7d5b3a7c5295d03c55f24398d4536992c05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://344b07c0668406039d64bf0065e0ed2c1a8512a182a77d433b5a53b6dfaae179\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T17:03:45Z\\\",\\\"message\\\":\\\"file observer\\\\nW1124 17:03:45.190676 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1124 17:03:45.190853 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 17:03:45.192393 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-106667993/tls.crt::/tmp/serving-cert-106667993/tls.key\\\\\\\"\\\\nI1124 17:03:45.504791 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 17:03:45.507873 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 17:03:45.507893 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 17:03:45.507915 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 17:03:45.507920 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 17:03:45.518826 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 17:03:45.518852 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 17:03:45.518879 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518891 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 17:03:45.518904 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 17:03:45.518911 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 17:03:45.518920 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 17:03:45.518928 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 17:03:45.520540 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:39Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://798ce23115c62a9b53981e8b17064666d9eee7e810370c5a9b600bc2f7e1c3d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a0375d8ac78c097aaaadec88b05e994fca9819a20e0f3bca9a937143681cbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.780869 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bca08199e4debc09866698e9cd9daf646f81a2c8e759ebfe4829f139da083a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12ab18585d28045dbeb9ee3495067e39c7bdc8c22d329537166389cb90a32d4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.794950 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.795047 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.795074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.795105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.795130 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.805839 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8x59s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea01e72c-3c1c-465f-a4cb-90eb34c2f871\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:04:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:34Z\\\",\\\"message\\\":\\\"2025-11-24T17:03:48+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331\\\\n2025-11-24T17:03:48+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6d9c3a67-abde-40a0-aa27-b8416e239331 to /host/opt/cni/bin/\\\\n2025-11-24T17:03:48Z [verbose] multus-daemon started\\\\n2025-11-24T17:03:48Z [verbose] Readiness Indicator file check\\\\n2025-11-24T17:04:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:04:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7lghj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8x59s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.834966 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v5p49" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15207e5d-cdbd-432f-bef7-cfb6992808f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5d2e2855ce5ea0840ac6811bd92c69baafd79bc2f2dde723ceb21bd3093c663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1cb219143a3e8cf7e6aa958a69f9105e21df4d5a0c322f4d8e7a2c277ca249bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c314a5b415f26475c7a647bc594c1a5d13063c48ce86fd40caf67aebbbd1f748\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d45532c9ef99da951e31c4ffd5426fded86264e487ab4c73a4ebec1203add53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0babe021e83da538ad49412c4cfb3182b80d9c26cd64e59bc1becf12d62bbb57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d63aa7ede07e54dac578aaf421454fdc8b1efd5bc86c4aac5cebadd43f7f3459\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://331da95655f1d01cc85a10cb2f3ceeb3aa22e1db92280ac21b753e9d2eb24592\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6jz5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v5p49\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.870475 4760 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T17:03:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T17:04:51Z\\\",\\\"message\\\":\\\"rc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:04:51Z is after 2025-08-24T17:21:41Z]\\\\nI1124 17:04:51.500188 6871 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T17:04:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T17:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T17:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T17:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85wkh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T17:03:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-t55f2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T17:05:05Z is after 2025-08-24T17:21:41Z" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.898051 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.898114 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.898135 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.898161 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:05 crc kubenswrapper[4760]: I1124 17:05:05.898179 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:05Z","lastTransitionTime":"2025-11-24T17:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.001151 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.001227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.001251 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.001287 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.001314 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.104181 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.104275 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.104297 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.104330 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.104352 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.207805 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.207872 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.207890 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.207917 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.207940 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.310910 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.310988 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.311039 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.311074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.311097 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.415074 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.415143 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.415169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.415204 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.415230 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.466236 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.466286 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.466397 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:06 crc kubenswrapper[4760]: E1124 17:05:06.466750 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:06 crc kubenswrapper[4760]: E1124 17:05:06.466985 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:06 crc kubenswrapper[4760]: E1124 17:05:06.467161 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.518429 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.518491 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.518514 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.518545 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.518603 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.622105 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.622151 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.622161 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.622178 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.622191 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.726807 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.726919 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.726974 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.727039 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.727067 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.830118 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.830283 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.830307 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.830332 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.830351 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.934717 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.934769 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.934789 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.934812 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:06 crc kubenswrapper[4760]: I1124 17:05:06.934832 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:06Z","lastTransitionTime":"2025-11-24T17:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.038754 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.038808 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.038827 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.038852 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.038870 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.142419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.142481 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.142499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.142525 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.142543 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.246333 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.246384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.246400 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.246425 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.246443 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.349253 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.349361 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.349380 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.349408 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.349428 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.452315 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.452428 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.452447 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.452474 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.452493 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.465996 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:07 crc kubenswrapper[4760]: E1124 17:05:07.466367 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.467596 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:05:07 crc kubenswrapper[4760]: E1124 17:05:07.467872 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.555553 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.555617 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.555636 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.555659 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.555678 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.658909 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.658974 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.658990 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.659040 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.659058 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.762530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.762616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.762639 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.762672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.762699 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.866569 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.866665 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.866696 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.866732 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.866758 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.970192 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.970263 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.970286 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.970315 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:07 crc kubenswrapper[4760]: I1124 17:05:07.970339 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:07Z","lastTransitionTime":"2025-11-24T17:05:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.073788 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.073849 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.073871 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.073899 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.073922 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.176397 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.176454 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.176473 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.176499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.176519 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.279705 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.279835 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.279860 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.279891 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.279913 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.383314 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.383386 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.383413 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.383445 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.383510 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.466077 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.466697 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.466858 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:08 crc kubenswrapper[4760]: E1124 17:05:08.466961 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:08 crc kubenswrapper[4760]: E1124 17:05:08.467227 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:08 crc kubenswrapper[4760]: E1124 17:05:08.467537 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.486793 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.486855 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.486879 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.486910 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.486934 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.590456 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.590514 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.590530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.590554 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.590572 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.694396 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.694475 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.694499 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.694530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.694554 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.798076 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.798139 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.798156 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.798177 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.798193 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.901593 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.901657 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.901674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.901698 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:08 crc kubenswrapper[4760]: I1124 17:05:08.901715 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:08Z","lastTransitionTime":"2025-11-24T17:05:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.005145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.005199 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.005216 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.005241 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.005260 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.108706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.108767 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.108785 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.108810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.108829 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.211999 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.212091 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.212114 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.212145 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.212167 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.315136 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.315196 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.315218 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.315248 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.315270 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.418483 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.418558 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.418597 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.418628 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.418653 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.465886 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:09 crc kubenswrapper[4760]: E1124 17:05:09.466122 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.521916 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.522034 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.522057 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.522082 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.522101 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.625432 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.625486 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.625504 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.625526 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.625544 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.729483 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.729565 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.729589 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.729619 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.729636 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.832952 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.833042 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.833060 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.833085 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.833103 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.935562 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.935618 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.935637 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.935665 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:09 crc kubenswrapper[4760]: I1124 17:05:09.935684 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:09Z","lastTransitionTime":"2025-11-24T17:05:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.037812 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.037851 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.037862 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.037879 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.037891 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.141377 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.141472 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.141497 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.141530 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.141553 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.244461 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.244520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.244536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.244560 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.244578 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.347281 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.347346 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.347372 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.347401 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.347425 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.450823 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.450910 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.450930 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.450954 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.450972 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.465517 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.465528 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:10 crc kubenswrapper[4760]: E1124 17:05:10.465674 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.465774 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:10 crc kubenswrapper[4760]: E1124 17:05:10.465921 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:10 crc kubenswrapper[4760]: E1124 17:05:10.466358 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.555615 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.555674 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.555687 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.555706 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.555720 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.659163 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.659242 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.659264 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.659291 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.659310 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.762838 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.762929 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.762947 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.762972 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.762990 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.866577 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.866654 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.866672 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.866707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.866725 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.969490 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.969574 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.969598 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.969635 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:10 crc kubenswrapper[4760]: I1124 17:05:10.969659 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:10Z","lastTransitionTime":"2025-11-24T17:05:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.073566 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.073772 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.073810 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.073839 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.073856 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.176686 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.176768 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.176802 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.176833 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.176853 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.279768 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.279875 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.279899 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.279929 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.279951 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.383323 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.383384 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.383401 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.383424 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.383441 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.465936 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:11 crc kubenswrapper[4760]: E1124 17:05:11.466229 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.486519 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.486587 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.486611 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.486640 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.486665 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.590175 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.590233 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.590250 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.590272 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.590292 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.694389 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.694441 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.694461 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.694486 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.694504 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.797462 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.797520 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.797537 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.797561 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.797578 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.900460 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.900554 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.900571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.900596 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:11 crc kubenswrapper[4760]: I1124 17:05:11.900615 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:11Z","lastTransitionTime":"2025-11-24T17:05:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.004030 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.004068 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.004079 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.004097 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.004112 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.107494 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.107562 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.107585 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.107616 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.107637 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.209855 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.209924 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.209942 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.209968 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.209990 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.313348 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.313419 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.313436 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.313464 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.313481 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.417632 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.417709 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.417734 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.417760 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.417781 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.465450 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.465555 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:12 crc kubenswrapper[4760]: E1124 17:05:12.465676 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.465734 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:12 crc kubenswrapper[4760]: E1124 17:05:12.466615 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:12 crc kubenswrapper[4760]: E1124 17:05:12.466719 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.485194 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.521031 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.521124 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.521143 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.521169 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.521189 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.624271 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.624359 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.624377 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.624406 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.624437 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.727140 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.727207 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.727226 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.727252 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.727271 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.831072 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.831175 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.831201 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.831233 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.831254 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.934867 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.934935 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.934953 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.934981 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:12 crc kubenswrapper[4760]: I1124 17:05:12.935000 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:12Z","lastTransitionTime":"2025-11-24T17:05:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.038090 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.038176 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.038200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.038227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.038245 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.141503 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.141597 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.141646 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.141675 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.141693 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.245284 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.245397 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.245417 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.245446 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.245465 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.349795 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.349856 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.349874 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.349900 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.349919 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.453422 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.453518 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.453536 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.453563 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.453580 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.466279 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:13 crc kubenswrapper[4760]: E1124 17:05:13.466633 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.556587 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.556660 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.556680 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.556707 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.556732 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.660398 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.660467 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.660488 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.660514 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.660531 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.764472 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.764544 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.764562 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.764588 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.764605 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.868094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.868181 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.868200 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.868227 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.868247 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.971781 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.971851 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.971873 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.971899 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:13 crc kubenswrapper[4760]: I1124 17:05:13.971918 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:13Z","lastTransitionTime":"2025-11-24T17:05:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.075156 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.075222 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.075241 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.075268 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.075286 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.178639 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.178697 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.178717 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.178742 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.178764 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.281969 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.282090 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.282117 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.282151 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.282175 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.385695 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.385770 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.385792 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.385858 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.385881 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.466211 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.466316 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.466321 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:14 crc kubenswrapper[4760]: E1124 17:05:14.466435 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:14 crc kubenswrapper[4760]: E1124 17:05:14.466614 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:14 crc kubenswrapper[4760]: E1124 17:05:14.467159 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.490094 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.490164 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.490184 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.490213 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.490234 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.573517 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.573571 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.573586 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.573606 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.573623 4760 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T17:05:14Z","lastTransitionTime":"2025-11-24T17:05:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.653455 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh"] Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.654044 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.657254 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.657621 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.658082 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.659145 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.734596 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=86.734573353 podStartE2EDuration="1m26.734573353s" podCreationTimestamp="2025-11-24 17:03:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.734296105 +0000 UTC m=+110.057177695" watchObservedRunningTime="2025-11-24 17:05:14.734573353 +0000 UTC m=+110.057454903" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.734916 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.734910123 podStartE2EDuration="2.734910123s" podCreationTimestamp="2025-11-24 17:05:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.716169291 +0000 UTC m=+110.039050881" watchObservedRunningTime="2025-11-24 17:05:14.734910123 +0000 UTC m=+110.057791663" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.768533 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podStartSLOduration=89.768516756 podStartE2EDuration="1m29.768516756s" podCreationTimestamp="2025-11-24 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.768369041 +0000 UTC m=+110.091250611" watchObservedRunningTime="2025-11-24 17:05:14.768516756 +0000 UTC m=+110.091398306" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.795758 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gjlbz" podStartSLOduration=87.795731917 podStartE2EDuration="1m27.795731917s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.779770025 +0000 UTC m=+110.102651585" watchObservedRunningTime="2025-11-24 17:05:14.795731917 +0000 UTC m=+110.118613467" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.796129 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.796122458 podStartE2EDuration="1m29.796122458s" podCreationTimestamp="2025-11-24 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.795902792 +0000 UTC m=+110.118784392" watchObservedRunningTime="2025-11-24 17:05:14.796122458 +0000 UTC m=+110.119004188" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.804642 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.804733 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c46f9a50-c042-4330-87ad-6b54f95bdd2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.804767 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c46f9a50-c042-4330-87ad-6b54f95bdd2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.804825 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.804860 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c46f9a50-c042-4330-87ad-6b54f95bdd2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.825388 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-8x59s" podStartSLOduration=88.825369477 podStartE2EDuration="1m28.825369477s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.824964466 +0000 UTC m=+110.147846016" watchObservedRunningTime="2025-11-24 17:05:14.825369477 +0000 UTC m=+110.148251027" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.843478 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-v5p49" podStartSLOduration=88.8434572 podStartE2EDuration="1m28.8434572s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.842213565 +0000 UTC m=+110.165095185" watchObservedRunningTime="2025-11-24 17:05:14.8434572 +0000 UTC m=+110.166338750" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.875268 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=44.875243011 podStartE2EDuration="44.875243011s" podCreationTimestamp="2025-11-24 17:04:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.874418968 +0000 UTC m=+110.197300538" watchObservedRunningTime="2025-11-24 17:05:14.875243011 +0000 UTC m=+110.198124571" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.899854 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-49579" podStartSLOduration=89.899835509 podStartE2EDuration="1m29.899835509s" podCreationTimestamp="2025-11-24 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.899316394 +0000 UTC m=+110.222197954" watchObservedRunningTime="2025-11-24 17:05:14.899835509 +0000 UTC m=+110.222717059" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905777 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905864 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c46f9a50-c042-4330-87ad-6b54f95bdd2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905891 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c46f9a50-c042-4330-87ad-6b54f95bdd2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905919 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905919 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.905943 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c46f9a50-c042-4330-87ad-6b54f95bdd2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.906079 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c46f9a50-c042-4330-87ad-6b54f95bdd2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.907049 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c46f9a50-c042-4330-87ad-6b54f95bdd2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.914925 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c46f9a50-c042-4330-87ad-6b54f95bdd2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.920488 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c46f9a50-c042-4330-87ad-6b54f95bdd2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-pw6dh\" (UID: \"c46f9a50-c042-4330-87ad-6b54f95bdd2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.928380 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.928362268 podStartE2EDuration="59.928362268s" podCreationTimestamp="2025-11-24 17:04:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:14.927418311 +0000 UTC m=+110.250299891" watchObservedRunningTime="2025-11-24 17:05:14.928362268 +0000 UTC m=+110.251243818" Nov 24 17:05:14 crc kubenswrapper[4760]: I1124 17:05:14.979133 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" Nov 24 17:05:15 crc kubenswrapper[4760]: I1124 17:05:15.004178 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vx8zv" podStartSLOduration=89.004158197 podStartE2EDuration="1m29.004158197s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:15.0039354 +0000 UTC m=+110.326816970" watchObservedRunningTime="2025-11-24 17:05:15.004158197 +0000 UTC m=+110.327039767" Nov 24 17:05:15 crc kubenswrapper[4760]: I1124 17:05:15.185195 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" event={"ID":"c46f9a50-c042-4330-87ad-6b54f95bdd2e","Type":"ContainerStarted","Data":"995877ea18b1a7267a100d2aa4c22d054433c57298b9ffda394d310f7d21baae"} Nov 24 17:05:15 crc kubenswrapper[4760]: I1124 17:05:15.185590 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" event={"ID":"c46f9a50-c042-4330-87ad-6b54f95bdd2e","Type":"ContainerStarted","Data":"0f402dd44de047155edfa03787580669d71aef09f70b33e31c437ad8f3e6c69b"} Nov 24 17:05:15 crc kubenswrapper[4760]: I1124 17:05:15.206400 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-pw6dh" podStartSLOduration=89.206366749 podStartE2EDuration="1m29.206366749s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:15.204930769 +0000 UTC m=+110.527812349" watchObservedRunningTime="2025-11-24 17:05:15.206366749 +0000 UTC m=+110.529248339" Nov 24 17:05:15 crc kubenswrapper[4760]: I1124 17:05:15.466183 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:15 crc kubenswrapper[4760]: E1124 17:05:15.468363 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:16 crc kubenswrapper[4760]: I1124 17:05:16.465818 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:16 crc kubenswrapper[4760]: I1124 17:05:16.465888 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:16 crc kubenswrapper[4760]: I1124 17:05:16.465907 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:16 crc kubenswrapper[4760]: E1124 17:05:16.465970 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:16 crc kubenswrapper[4760]: E1124 17:05:16.466117 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:16 crc kubenswrapper[4760]: E1124 17:05:16.466229 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:17 crc kubenswrapper[4760]: I1124 17:05:17.466101 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:17 crc kubenswrapper[4760]: E1124 17:05:17.466619 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:18 crc kubenswrapper[4760]: I1124 17:05:18.465519 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:18 crc kubenswrapper[4760]: I1124 17:05:18.465632 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:18 crc kubenswrapper[4760]: I1124 17:05:18.465660 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:18 crc kubenswrapper[4760]: E1124 17:05:18.465791 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:18 crc kubenswrapper[4760]: E1124 17:05:18.465964 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:18 crc kubenswrapper[4760]: E1124 17:05:18.466192 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:19 crc kubenswrapper[4760]: I1124 17:05:19.466042 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:19 crc kubenswrapper[4760]: E1124 17:05:19.466261 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:20 crc kubenswrapper[4760]: I1124 17:05:20.465805 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:20 crc kubenswrapper[4760]: I1124 17:05:20.465847 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:20 crc kubenswrapper[4760]: E1124 17:05:20.466057 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:20 crc kubenswrapper[4760]: I1124 17:05:20.466155 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:20 crc kubenswrapper[4760]: E1124 17:05:20.466456 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:20 crc kubenswrapper[4760]: E1124 17:05:20.466563 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:20 crc kubenswrapper[4760]: I1124 17:05:20.467551 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:05:20 crc kubenswrapper[4760]: E1124 17:05:20.467783 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-t55f2_openshift-ovn-kubernetes(a1ccc7f2-1c1b-42b4-aac5-a9865757a92b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.210151 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/1.log" Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.210864 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/0.log" Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.210955 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" containerID="00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea" exitCode=1 Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.211041 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerDied","Data":"00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea"} Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.211134 4760 scope.go:117] "RemoveContainer" containerID="ca4f7c87ec9c138b09d2991debcc0011ec4fb9bb46be4ab7ea157c900ad6894f" Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.211886 4760 scope.go:117] "RemoveContainer" containerID="00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea" Nov 24 17:05:21 crc kubenswrapper[4760]: E1124 17:05:21.212894 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-8x59s_openshift-multus(ea01e72c-3c1c-465f-a4cb-90eb34c2f871)\"" pod="openshift-multus/multus-8x59s" podUID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" Nov 24 17:05:21 crc kubenswrapper[4760]: I1124 17:05:21.465822 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:21 crc kubenswrapper[4760]: E1124 17:05:21.466062 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:22 crc kubenswrapper[4760]: I1124 17:05:22.216917 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/1.log" Nov 24 17:05:22 crc kubenswrapper[4760]: I1124 17:05:22.465400 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:22 crc kubenswrapper[4760]: I1124 17:05:22.465459 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:22 crc kubenswrapper[4760]: I1124 17:05:22.465410 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:22 crc kubenswrapper[4760]: E1124 17:05:22.465663 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:22 crc kubenswrapper[4760]: E1124 17:05:22.465803 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:22 crc kubenswrapper[4760]: E1124 17:05:22.465937 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:23 crc kubenswrapper[4760]: I1124 17:05:23.465929 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:23 crc kubenswrapper[4760]: E1124 17:05:23.466195 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:24 crc kubenswrapper[4760]: I1124 17:05:24.466348 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:24 crc kubenswrapper[4760]: I1124 17:05:24.466384 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:24 crc kubenswrapper[4760]: I1124 17:05:24.466355 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:24 crc kubenswrapper[4760]: E1124 17:05:24.466558 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:24 crc kubenswrapper[4760]: E1124 17:05:24.466827 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:24 crc kubenswrapper[4760]: E1124 17:05:24.466719 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:25 crc kubenswrapper[4760]: I1124 17:05:25.466199 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:25 crc kubenswrapper[4760]: E1124 17:05:25.470984 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:25 crc kubenswrapper[4760]: E1124 17:05:25.488672 4760 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 24 17:05:25 crc kubenswrapper[4760]: E1124 17:05:25.571258 4760 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:05:26 crc kubenswrapper[4760]: I1124 17:05:26.466209 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:26 crc kubenswrapper[4760]: E1124 17:05:26.466681 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:26 crc kubenswrapper[4760]: I1124 17:05:26.466493 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:26 crc kubenswrapper[4760]: E1124 17:05:26.467356 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:26 crc kubenswrapper[4760]: I1124 17:05:26.466403 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:26 crc kubenswrapper[4760]: E1124 17:05:26.467549 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:27 crc kubenswrapper[4760]: I1124 17:05:27.466141 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:27 crc kubenswrapper[4760]: E1124 17:05:27.467180 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:28 crc kubenswrapper[4760]: I1124 17:05:28.466117 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:28 crc kubenswrapper[4760]: I1124 17:05:28.466118 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:28 crc kubenswrapper[4760]: E1124 17:05:28.466321 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:28 crc kubenswrapper[4760]: E1124 17:05:28.466502 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:28 crc kubenswrapper[4760]: I1124 17:05:28.466784 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:28 crc kubenswrapper[4760]: E1124 17:05:28.467107 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:29 crc kubenswrapper[4760]: I1124 17:05:29.466462 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:29 crc kubenswrapper[4760]: E1124 17:05:29.467382 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:30 crc kubenswrapper[4760]: I1124 17:05:30.465713 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:30 crc kubenswrapper[4760]: I1124 17:05:30.465835 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:30 crc kubenswrapper[4760]: E1124 17:05:30.465922 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:30 crc kubenswrapper[4760]: I1124 17:05:30.465713 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:30 crc kubenswrapper[4760]: E1124 17:05:30.466060 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:30 crc kubenswrapper[4760]: E1124 17:05:30.466191 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:30 crc kubenswrapper[4760]: E1124 17:05:30.573185 4760 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:05:31 crc kubenswrapper[4760]: I1124 17:05:31.466456 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:31 crc kubenswrapper[4760]: E1124 17:05:31.467225 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:32 crc kubenswrapper[4760]: I1124 17:05:32.465843 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:32 crc kubenswrapper[4760]: I1124 17:05:32.465862 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:32 crc kubenswrapper[4760]: I1124 17:05:32.465934 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:32 crc kubenswrapper[4760]: E1124 17:05:32.466529 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:32 crc kubenswrapper[4760]: E1124 17:05:32.466641 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:32 crc kubenswrapper[4760]: E1124 17:05:32.466831 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:32 crc kubenswrapper[4760]: I1124 17:05:32.466958 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.262746 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/3.log" Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.266509 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerStarted","Data":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.266888 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.301648 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podStartSLOduration=107.301627456 podStartE2EDuration="1m47.301627456s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:33.299892087 +0000 UTC m=+128.622773647" watchObservedRunningTime="2025-11-24 17:05:33.301627456 +0000 UTC m=+128.624509016" Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.465750 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:33 crc kubenswrapper[4760]: E1124 17:05:33.465936 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.503859 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-dz6vg"] Nov 24 17:05:33 crc kubenswrapper[4760]: I1124 17:05:33.504112 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:33 crc kubenswrapper[4760]: E1124 17:05:33.504306 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:34 crc kubenswrapper[4760]: I1124 17:05:34.465800 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:34 crc kubenswrapper[4760]: I1124 17:05:34.465884 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:34 crc kubenswrapper[4760]: E1124 17:05:34.465952 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:34 crc kubenswrapper[4760]: E1124 17:05:34.466127 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:35 crc kubenswrapper[4760]: I1124 17:05:35.465496 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:35 crc kubenswrapper[4760]: I1124 17:05:35.465547 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:35 crc kubenswrapper[4760]: E1124 17:05:35.467888 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:35 crc kubenswrapper[4760]: E1124 17:05:35.467994 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:35 crc kubenswrapper[4760]: E1124 17:05:35.574841 4760 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 17:05:36 crc kubenswrapper[4760]: I1124 17:05:36.466337 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:36 crc kubenswrapper[4760]: I1124 17:05:36.466337 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:36 crc kubenswrapper[4760]: E1124 17:05:36.467062 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:36 crc kubenswrapper[4760]: I1124 17:05:36.467151 4760 scope.go:117] "RemoveContainer" containerID="00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea" Nov 24 17:05:36 crc kubenswrapper[4760]: E1124 17:05:36.467154 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:37 crc kubenswrapper[4760]: I1124 17:05:37.284556 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/1.log" Nov 24 17:05:37 crc kubenswrapper[4760]: I1124 17:05:37.284926 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerStarted","Data":"70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc"} Nov 24 17:05:37 crc kubenswrapper[4760]: I1124 17:05:37.399885 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:05:37 crc kubenswrapper[4760]: I1124 17:05:37.465678 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:37 crc kubenswrapper[4760]: I1124 17:05:37.465773 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:37 crc kubenswrapper[4760]: E1124 17:05:37.465868 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:37 crc kubenswrapper[4760]: E1124 17:05:37.465968 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:38 crc kubenswrapper[4760]: I1124 17:05:38.465866 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:38 crc kubenswrapper[4760]: I1124 17:05:38.465894 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:38 crc kubenswrapper[4760]: E1124 17:05:38.466124 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:38 crc kubenswrapper[4760]: E1124 17:05:38.466281 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:39 crc kubenswrapper[4760]: I1124 17:05:39.465983 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:39 crc kubenswrapper[4760]: I1124 17:05:39.466062 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:39 crc kubenswrapper[4760]: E1124 17:05:39.466359 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 17:05:39 crc kubenswrapper[4760]: E1124 17:05:39.466505 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-dz6vg" podUID="e462626d-5645-4be7-89b4-383a4cde08f9" Nov 24 17:05:40 crc kubenswrapper[4760]: I1124 17:05:40.466268 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:40 crc kubenswrapper[4760]: I1124 17:05:40.466373 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:40 crc kubenswrapper[4760]: E1124 17:05:40.466464 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 17:05:40 crc kubenswrapper[4760]: E1124 17:05:40.466537 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.465356 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.465422 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.468649 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.468917 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.468908 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 24 17:05:41 crc kubenswrapper[4760]: I1124 17:05:41.469555 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 24 17:05:42 crc kubenswrapper[4760]: I1124 17:05:42.465474 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:42 crc kubenswrapper[4760]: I1124 17:05:42.465710 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:42 crc kubenswrapper[4760]: I1124 17:05:42.468690 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 24 17:05:42 crc kubenswrapper[4760]: I1124 17:05:42.468797 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.194517 4760 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.249380 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.250111 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.253143 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jrf7z"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.253802 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.254067 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.256139 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.256542 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.257150 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.257257 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.257508 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.258056 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.258738 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.258969 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.261558 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.262645 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.264645 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.271960 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.272330 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.272768 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.276360 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.276732 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.276804 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.282301 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295483 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295544 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295578 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295610 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htpzm\" (UniqueName: \"kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295636 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295665 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-image-import-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295687 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-audit\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295705 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-serving-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295739 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-serving-cert\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295758 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgj9m\" (UniqueName: \"kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295781 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295819 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-serving-cert\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295839 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295858 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-client\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295892 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvcwj\" (UniqueName: \"kubernetes.io/projected/95f656bb-1008-45d4-a2e2-484e574e7767-kube-api-access-fvcwj\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295913 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295932 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-encryption-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.295952 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/95f656bb-1008-45d4-a2e2-484e574e7767-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296137 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzmlg\" (UniqueName: \"kubernetes.io/projected/474f6772-b7de-416d-bf20-9cd6326bfb37-kube-api-access-pzmlg\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296269 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296330 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296467 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tddbf\" (UniqueName: \"kubernetes.io/projected/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-kube-api-access-tddbf\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296510 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-node-pullsecrets\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296606 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296662 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-audit-dir\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296683 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.296879 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297054 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297462 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297568 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297725 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297775 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.297898 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298109 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298140 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298290 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298326 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298523 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298578 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298749 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.298768 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.306173 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.306234 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.308995 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rcsnv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.309970 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.311893 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.314200 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.314532 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.314589 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.314715 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.315653 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.316953 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-29k4r"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.317241 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.322482 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.322840 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.322927 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.323109 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.323275 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.323644 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.324249 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.324721 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rt9dn"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.324872 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.325212 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.325478 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.326293 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-2stcx"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.326772 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.326910 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.327198 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.329539 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.331145 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.331864 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.332670 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.333253 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.334726 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.335309 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.335673 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336065 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c8j2v"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336078 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336439 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336674 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336764 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.336902 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.337047 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.337128 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.338087 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.338730 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.342952 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.343143 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.343246 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.346041 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.347576 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.348951 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.349859 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.349960 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.350489 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.350574 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.350792 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.350919 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.351065 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.351089 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.351195 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.351265 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.351341 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352515 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352658 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352743 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352847 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352900 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.352945 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353039 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353061 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353098 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353126 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353154 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353201 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353264 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353280 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353321 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353332 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353304 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353436 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353619 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353703 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353739 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353747 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353834 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353882 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353913 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.354024 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.354154 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353705 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.354480 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.353707 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.354828 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.359067 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.359111 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-2m97z"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.359569 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.361901 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.376556 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.389107 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.389411 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.389931 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.391828 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rsr75"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.392734 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397523 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cebb48e4-432a-42f0-9e13-7a11ab680535-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397584 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvcwj\" (UniqueName: \"kubernetes.io/projected/95f656bb-1008-45d4-a2e2-484e574e7767-kube-api-access-fvcwj\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397604 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397627 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397669 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-service-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397691 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-encryption-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397707 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/212051f7-d251-4005-a25d-ac53d864a70c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397747 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397764 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmszt\" (UniqueName: \"kubernetes.io/projected/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-kube-api-access-nmszt\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397781 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d483076b-151b-465f-beec-94e8b65379ef-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397828 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/95f656bb-1008-45d4-a2e2-484e574e7767-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397848 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-machine-approver-tls\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397866 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397903 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd8gg\" (UniqueName: \"kubernetes.io/projected/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-kube-api-access-vd8gg\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397923 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397970 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzmlg\" (UniqueName: \"kubernetes.io/projected/474f6772-b7de-416d-bf20-9cd6326bfb37-kube-api-access-pzmlg\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.397989 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398031 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398055 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/53abeab9-107f-44fd-84b7-c641f8583fbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398075 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398110 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-metrics-certs\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398174 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.398752 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/212051f7-d251-4005-a25d-ac53d864a70c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.399098 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/212051f7-d251-4005-a25d-ac53d864a70c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.399142 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.399315 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400083 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400175 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tddbf\" (UniqueName: \"kubernetes.io/projected/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-kube-api-access-tddbf\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400209 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvs4j\" (UniqueName: \"kubernetes.io/projected/cebb48e4-432a-42f0-9e13-7a11ab680535-kube-api-access-pvs4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400239 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400288 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-service-ca-bundle\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400315 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-node-pullsecrets\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400339 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400361 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400511 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-node-pullsecrets\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400555 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400576 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-serving-cert\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400612 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400631 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-srv-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400662 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-serving-cert\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400682 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400702 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-audit-dir\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400724 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400787 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400813 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldm65\" (UniqueName: \"kubernetes.io/projected/308d97cc-5fc4-4a28-883b-bb545c57132b-kube-api-access-ldm65\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400835 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmp4b\" (UniqueName: \"kubernetes.io/projected/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-kube-api-access-kmp4b\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400852 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400870 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400887 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-profile-collector-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400949 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400969 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.400983 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-client\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401026 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401048 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t52cq\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-kube-api-access-t52cq\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401069 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htpzm\" (UniqueName: \"kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401128 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401149 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401170 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401186 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-stats-auth\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401206 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-image-import-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401225 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/53abeab9-107f-44fd-84b7-c641f8583fbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401248 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-default-certificate\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401267 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-audit\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401336 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-serving-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401359 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-425ww\" (UniqueName: \"kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401389 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh8kn\" (UniqueName: \"kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401417 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-config\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401654 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.401761 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.402096 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.402876 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.403482 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.403577 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.403670 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.404505 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bqtxg"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.405810 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.405990 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.406093 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/474f6772-b7de-416d-bf20-9cd6326bfb37-audit-dir\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.406898 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.407850 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.408540 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.409685 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-serving-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410640 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-audit\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410721 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-serving-cert\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410760 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410859 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgj9m\" (UniqueName: \"kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410891 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cebb48e4-432a-42f0-9e13-7a11ab680535-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410919 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-auth-proxy-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410958 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvkrv\" (UniqueName: \"kubernetes.io/projected/843e455c-4df4-4e25-91f1-456b61889db5-kube-api-access-fvkrv\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.410996 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-service-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411052 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411147 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411180 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd686\" (UniqueName: \"kubernetes.io/projected/d483076b-151b-465f-beec-94e8b65379ef-kube-api-access-qd686\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411237 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411277 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411299 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-client\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411324 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-serving-cert\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411352 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411384 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8wwd\" (UniqueName: \"kubernetes.io/projected/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-kube-api-access-p8wwd\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411471 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411504 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/843e455c-4df4-4e25-91f1-456b61889db5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411534 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-config\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.411504 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.412709 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-images\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.412745 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-config\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.412777 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d483076b-151b-465f-beec-94e8b65379ef-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.413801 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-image-import-ca\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.416113 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.416194 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.417188 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.417549 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.418152 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.420149 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/474f6772-b7de-416d-bf20-9cd6326bfb37-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.450401 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.450832 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.451201 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.451837 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.453416 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.453639 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.453780 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.454896 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/95f656bb-1008-45d4-a2e2-484e574e7767-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.455805 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-serving-cert\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.456034 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.456110 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-serving-cert\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.456349 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-encryption-config\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.459228 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/474f6772-b7de-416d-bf20-9cd6326bfb37-etcd-client\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.460362 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cxr8b"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.461211 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.461258 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.461515 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.461626 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.462784 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.462941 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.462952 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.463990 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.466363 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.473505 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.474574 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.477720 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.479385 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.479983 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.480458 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.480781 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-smj94"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481087 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481562 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jrf7z"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481689 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481906 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481927 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.481942 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.483019 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.483387 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.483403 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.483433 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.487393 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rcsnv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.487454 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rt9dn"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.487468 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-29k4r"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.487481 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.489922 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.489957 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.491861 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.491907 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.501820 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.502260 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.506095 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-bqz55"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.509822 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c8j2v"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.510032 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.510016 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2stcx"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.511828 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.513321 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.513938 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.513970 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514015 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmp4b\" (UniqueName: \"kubernetes.io/projected/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-kube-api-access-kmp4b\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514039 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-profile-collector-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514062 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514083 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514102 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-client\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514123 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t52cq\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-kube-api-access-t52cq\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514173 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514201 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-stats-auth\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514228 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/53abeab9-107f-44fd-84b7-c641f8583fbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514248 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-default-certificate\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514268 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-425ww\" (UniqueName: \"kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514294 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh8kn\" (UniqueName: \"kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514322 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-config\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514500 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514542 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514578 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cebb48e4-432a-42f0-9e13-7a11ab680535-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514603 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-auth-proxy-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514622 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvkrv\" (UniqueName: \"kubernetes.io/projected/843e455c-4df4-4e25-91f1-456b61889db5-kube-api-access-fvkrv\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514643 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-service-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.514664 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.515107 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rsr75"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.515809 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd686\" (UniqueName: \"kubernetes.io/projected/d483076b-151b-465f-beec-94e8b65379ef-kube-api-access-qd686\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516100 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516272 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516380 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8wwd\" (UniqueName: \"kubernetes.io/projected/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-kube-api-access-p8wwd\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516461 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516472 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516485 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516687 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/843e455c-4df4-4e25-91f1-456b61889db5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516797 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-images\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.516885 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-config\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.517196 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.517435 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.517615 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.517726 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.517770 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.518231 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-config\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.518328 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-auth-proxy-config\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.518358 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-service-ca-bundle\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.518898 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/843e455c-4df4-4e25-91f1-456b61889db5-images\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.519210 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d483076b-151b-465f-beec-94e8b65379ef-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521790 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-config\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521826 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cebb48e4-432a-42f0-9e13-7a11ab680535-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521860 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521882 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-service-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521902 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/212051f7-d251-4005-a25d-ac53d864a70c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521922 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-machine-approver-tls\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521938 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521954 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521969 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmszt\" (UniqueName: \"kubernetes.io/projected/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-kube-api-access-nmszt\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.521985 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d483076b-151b-465f-beec-94e8b65379ef-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522022 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd8gg\" (UniqueName: \"kubernetes.io/projected/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-kube-api-access-vd8gg\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522057 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522075 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/53abeab9-107f-44fd-84b7-c641f8583fbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522093 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-metrics-certs\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522109 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522124 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/212051f7-d251-4005-a25d-ac53d864a70c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522140 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522181 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvs4j\" (UniqueName: \"kubernetes.io/projected/cebb48e4-432a-42f0-9e13-7a11ab680535-kube-api-access-pvs4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522188 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522199 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522219 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-service-ca-bundle\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522237 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/212051f7-d251-4005-a25d-ac53d864a70c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522267 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522271 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d483076b-151b-465f-beec-94e8b65379ef-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522285 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522314 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522335 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/53abeab9-107f-44fd-84b7-c641f8583fbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522354 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522411 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-serving-cert\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522436 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-srv-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522471 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-serving-cert\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522500 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldm65\" (UniqueName: \"kubernetes.io/projected/308d97cc-5fc4-4a28-883b-bb545c57132b-kube-api-access-ldm65\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522585 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.522647 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.523076 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-config\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.523398 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.523405 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.523856 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.524339 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.524755 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/843e455c-4df4-4e25-91f1-456b61889db5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.524785 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.525187 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.525220 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.525797 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.526468 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.527302 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-machine-approver-tls\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.527466 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.527726 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.527759 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.528725 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cxr8b"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.529962 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.531222 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.531899 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.532099 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/53abeab9-107f-44fd-84b7-c641f8583fbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.533334 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.534185 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.534556 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.536018 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-serving-cert\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.536074 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-smj94"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.536455 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.537093 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.537374 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d483076b-151b-465f-beec-94e8b65379ef-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.538323 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.539483 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.541079 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.542283 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-2gzg7"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.543123 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.543298 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-bkvpl"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.544217 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.544576 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-bqz55"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.545734 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bqtxg"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.547081 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.548225 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.549344 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.550437 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-bkvpl"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.551577 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.551577 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rp7d8"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.552332 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.552788 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rp7d8"] Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.571763 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.591725 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.611877 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.632025 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.652258 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.655678 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cebb48e4-432a-42f0-9e13-7a11ab680535-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.671930 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.692163 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.699807 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/212051f7-d251-4005-a25d-ac53d864a70c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.712223 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.726045 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/212051f7-d251-4005-a25d-ac53d864a70c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.755943 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.772441 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.781343 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cebb48e4-432a-42f0-9e13-7a11ab680535-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.792028 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.813617 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.832780 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.852703 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.873593 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.882443 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-default-certificate\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.892853 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.903991 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-stats-auth\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.920663 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.933747 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-metrics-certs\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.934082 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.936692 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-service-ca-bundle\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.953213 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.972948 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 24 17:05:45 crc kubenswrapper[4760]: I1124 17:05:45.993170 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.013154 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.026250 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-srv-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.032992 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.039902 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/308d97cc-5fc4-4a28-883b-bb545c57132b-profile-collector-cert\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.052502 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.072512 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.092283 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.101383 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-client\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.112105 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.133078 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.149466 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-serving-cert\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.153171 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.159962 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-config\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.172945 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.174562 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.193369 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.195878 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-etcd-service-ca\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.213401 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.280125 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzmlg\" (UniqueName: \"kubernetes.io/projected/474f6772-b7de-416d-bf20-9cd6326bfb37-kube-api-access-pzmlg\") pod \"apiserver-76f77b778f-jrf7z\" (UID: \"474f6772-b7de-416d-bf20-9cd6326bfb37\") " pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.294222 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvcwj\" (UniqueName: \"kubernetes.io/projected/95f656bb-1008-45d4-a2e2-484e574e7767-kube-api-access-fvcwj\") pod \"cluster-samples-operator-665b6dd947-nnqqr\" (UID: \"95f656bb-1008-45d4-a2e2-484e574e7767\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.320423 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tddbf\" (UniqueName: \"kubernetes.io/projected/ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7-kube-api-access-tddbf\") pod \"openshift-config-operator-7777fb866f-nc4ft\" (UID: \"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.339277 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htpzm\" (UniqueName: \"kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm\") pod \"controller-manager-879f6c89f-bmf26\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.343350 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.361117 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.372325 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.393646 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.433161 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.440923 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgj9m\" (UniqueName: \"kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m\") pod \"route-controller-manager-6576b87f9c-xxkwx\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.454048 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.471878 4760 request.go:700] Waited for 1.017379115s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.475917 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.493498 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.496108 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.502107 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.514085 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.515259 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.533164 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.537965 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.553378 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.561707 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.572850 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.592234 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.612778 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.641422 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.652887 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.672536 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.692958 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.703173 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jrf7z"] Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.703899 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.713235 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 24 17:05:46 crc kubenswrapper[4760]: W1124 17:05:46.713450 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod474f6772_b7de_416d_bf20_9cd6326bfb37.slice/crio-02029157a4bdbdef33d83f69e29ef44d0318ef2bddd6e7a9e7042d594604ba79 WatchSource:0}: Error finding container 02029157a4bdbdef33d83f69e29ef44d0318ef2bddd6e7a9e7042d594604ba79: Status 404 returned error can't find the container with id 02029157a4bdbdef33d83f69e29ef44d0318ef2bddd6e7a9e7042d594604ba79 Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.732690 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.753213 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.754517 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft"] Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.772294 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.792190 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.813993 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.832196 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.852484 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.874490 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.892626 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.913570 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.933586 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.953292 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.971841 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.992520 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.993457 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr"] Nov 24 17:05:46 crc kubenswrapper[4760]: I1124 17:05:46.995783 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.013455 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.033748 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.052563 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: W1124 17:05:47.055220 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5dec400_42dd_4869_a1eb_233e55cc120f.slice/crio-a865858490d9de76c4d230483388da6fa91c5d86a09f15bf5778d120b246f106 WatchSource:0}: Error finding container a865858490d9de76c4d230483388da6fa91c5d86a09f15bf5778d120b246f106: Status 404 returned error can't find the container with id a865858490d9de76c4d230483388da6fa91c5d86a09f15bf5778d120b246f106 Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.072299 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.092571 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.113871 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.133565 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.152209 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.172324 4760 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.192526 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.212548 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.254314 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmp4b\" (UniqueName: \"kubernetes.io/projected/ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3-kube-api-access-kmp4b\") pod \"machine-approver-56656f9798-r7tdx\" (UID: \"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.261182 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.266827 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t52cq\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-kube-api-access-t52cq\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.285788 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-425ww\" (UniqueName: \"kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww\") pod \"oauth-openshift-558db77b4-zg8fk\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.306409 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh8kn\" (UniqueName: \"kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn\") pod \"console-f9d7485db-qr42v\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.328703 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvkrv\" (UniqueName: \"kubernetes.io/projected/843e455c-4df4-4e25-91f1-456b61889db5-kube-api-access-fvkrv\") pod \"machine-api-operator-5694c8668f-rt9dn\" (UID: \"843e455c-4df4-4e25-91f1-456b61889db5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.332651 4760 generic.go:334] "Generic (PLEG): container finished" podID="474f6772-b7de-416d-bf20-9cd6326bfb37" containerID="bc82f8feaada583b2c64e4bdc4add110b15076059bf987a72535a56b4e08ce47" exitCode=0 Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.332710 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" event={"ID":"474f6772-b7de-416d-bf20-9cd6326bfb37","Type":"ContainerDied","Data":"bc82f8feaada583b2c64e4bdc4add110b15076059bf987a72535a56b4e08ce47"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.332736 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" event={"ID":"474f6772-b7de-416d-bf20-9cd6326bfb37","Type":"ContainerStarted","Data":"02029157a4bdbdef33d83f69e29ef44d0318ef2bddd6e7a9e7042d594604ba79"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.336468 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" event={"ID":"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3","Type":"ContainerStarted","Data":"c187f9418e184a3abcd18825f6793d688200ce25aa19e008ed19aa4eb291fec8"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.338906 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" event={"ID":"d5dec400-42dd-4869-a1eb-233e55cc120f","Type":"ContainerStarted","Data":"c8c6c3a48704104fa59028022238086fcc9d1701d6696e775af5b945b28fd3de"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.338931 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" event={"ID":"d5dec400-42dd-4869-a1eb-233e55cc120f","Type":"ContainerStarted","Data":"a865858490d9de76c4d230483388da6fa91c5d86a09f15bf5778d120b246f106"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.339564 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.340912 4760 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-xxkwx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.340945 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.348633 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8wwd\" (UniqueName: \"kubernetes.io/projected/f4e5eb55-04d5-4d78-8c6e-73eb5233c269-kube-api-access-p8wwd\") pod \"router-default-5444994796-2m97z\" (UID: \"f4e5eb55-04d5-4d78-8c6e-73eb5233c269\") " pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.350637 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" event={"ID":"dd6a2056-7948-4823-bb36-f9e650d649db","Type":"ContainerStarted","Data":"803b1d9402e56cff176635ddfa7a776a9b21aa8e15f15a67ab430534e4dd3e9e"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.350666 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" event={"ID":"dd6a2056-7948-4823-bb36-f9e650d649db","Type":"ContainerStarted","Data":"fc33cf651d95b3101f872fdf850d2ab4b67e1067e714942659eee0a24dc2f180"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.350679 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.351989 4760 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-bmf26 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.352040 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.353837 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" event={"ID":"95f656bb-1008-45d4-a2e2-484e574e7767","Type":"ContainerStarted","Data":"14c6e30870ad8342990ce97bd86a1ec5ccbfd0e485b94a7f3fcb07d0fdef46b9"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.353876 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" event={"ID":"95f656bb-1008-45d4-a2e2-484e574e7767","Type":"ContainerStarted","Data":"6de83b036608d570e08f5f1efd3410e8b176b6e24eb4953c232b8c656f6d5927"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.357104 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7" containerID="f675ec924d4cd27e7a51482eb199ecf25f7b77cc0ef6170cda6f42a6dc91fb1f" exitCode=0 Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.357146 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" event={"ID":"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7","Type":"ContainerDied","Data":"f675ec924d4cd27e7a51482eb199ecf25f7b77cc0ef6170cda6f42a6dc91fb1f"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.357170 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" event={"ID":"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7","Type":"ContainerStarted","Data":"a7c65164cfd3f68f1c298aa82c1e33e9d346d58ca27f010c950503a132d01a12"} Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.371177 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd686\" (UniqueName: \"kubernetes.io/projected/d483076b-151b-465f-beec-94e8b65379ef-kube-api-access-qd686\") pod \"openshift-controller-manager-operator-756b6f6bc6-zb8vd\" (UID: \"d483076b-151b-465f-beec-94e8b65379ef\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.381249 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.386904 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd8gg\" (UniqueName: \"kubernetes.io/projected/d767e519-28a3-4c13-b1a7-ddd63d6b30b8-kube-api-access-vd8gg\") pod \"authentication-operator-69f744f599-c8j2v\" (UID: \"d767e519-28a3-4c13-b1a7-ddd63d6b30b8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.389069 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.415953 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldm65\" (UniqueName: \"kubernetes.io/projected/308d97cc-5fc4-4a28-883b-bb545c57132b-kube-api-access-ldm65\") pod \"catalog-operator-68c6474976-p46tt\" (UID: \"308d97cc-5fc4-4a28-883b-bb545c57132b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.425494 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.431391 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.434392 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmszt\" (UniqueName: \"kubernetes.io/projected/ffe37073-7f98-4d38-a3a3-7b3e1f3df449-kube-api-access-nmszt\") pod \"etcd-operator-b45778765-rsr75\" (UID: \"ffe37073-7f98-4d38-a3a3-7b3e1f3df449\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.439700 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.455901 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvs4j\" (UniqueName: \"kubernetes.io/projected/cebb48e4-432a-42f0-9e13-7a11ab680535-kube-api-access-pvs4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-phspq\" (UID: \"cebb48e4-432a-42f0-9e13-7a11ab680535\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.474462 4760 request.go:700] Waited for 1.949073381s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/serviceaccounts/cluster-image-registry-operator/token Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.485827 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/212051f7-d251-4005-a25d-ac53d864a70c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4rksk\" (UID: \"212051f7-d251-4005-a25d-ac53d864a70c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.496086 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.509359 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53abeab9-107f-44fd-84b7-c641f8583fbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-9w6q4\" (UID: \"53abeab9-107f-44fd-84b7-c641f8583fbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.515320 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.540997 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.552455 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.575067 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.593105 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.603907 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.611988 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.612255 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.619685 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.632940 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.652692 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.656195 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.673065 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.674952 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.675043 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c8j2v"] Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.710335 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" Nov 24 17:05:47 crc kubenswrapper[4760]: W1124 17:05:47.717161 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44091a4f_586a_44f5_934d_294bbe4458c0.slice/crio-f84363d016c15a3a7d6517522c4dec51050a5c823c1268fd48c853dc2d8f993f WatchSource:0}: Error finding container f84363d016c15a3a7d6517522c4dec51050a5c823c1268fd48c853dc2d8f993f: Status 404 returned error can't find the container with id f84363d016c15a3a7d6517522c4dec51050a5c823c1268fd48c853dc2d8f993f Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.717548 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" Nov 24 17:05:47 crc kubenswrapper[4760]: W1124 17:05:47.727963 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd767e519_28a3_4c13_b1a7_ddd63d6b30b8.slice/crio-cdf5c29688c4ed2925ecb9659d8910d36c46e1a751fba40e92486de66b6f1021 WatchSource:0}: Error finding container cdf5c29688c4ed2925ecb9659d8910d36c46e1a751fba40e92486de66b6f1021: Status 404 returned error can't find the container with id cdf5c29688c4ed2925ecb9659d8910d36c46e1a751fba40e92486de66b6f1021 Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750529 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750566 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750593 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccgqj\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750613 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/325ba002-fdd6-411d-bd9b-104bc011abd6-config\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750635 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750719 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b96edc58-6fe3-4af4-a252-3e967b42eb40-metrics-tls\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750799 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-etcd-client\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750868 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750930 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.750957 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751047 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751104 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7spnr\" (UniqueName: \"kubernetes.io/projected/b96edc58-6fe3-4af4-a252-3e967b42eb40-kube-api-access-7spnr\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751186 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/325ba002-fdd6-411d-bd9b-104bc011abd6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751251 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx4m2\" (UniqueName: \"kubernetes.io/projected/1821fc95-952c-44e2-9d50-5458327620e9-kube-api-access-xx4m2\") pod \"downloads-7954f5f757-2stcx\" (UID: \"1821fc95-952c-44e2-9d50-5458327620e9\") " pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751339 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751428 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwbzc\" (UniqueName: \"kubernetes.io/projected/dd612234-f5b5-494b-b405-878b48935d15-kube-api-access-jwbzc\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751498 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd612234-f5b5-494b-b405-878b48935d15-audit-dir\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751564 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-trusted-ca\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.751603 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: E1124 17:05:47.751726 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.251709993 +0000 UTC m=+143.574591663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752077 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-serving-cert\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752145 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp8nn\" (UniqueName: \"kubernetes.io/projected/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-kube-api-access-rp8nn\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752162 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-encryption-config\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752393 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-audit-policies\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752419 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48331584-ccc8-4953-ab9d-738087c5f55b-serving-cert\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752435 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtsdv\" (UniqueName: \"kubernetes.io/projected/48331584-ccc8-4953-ab9d-738087c5f55b-kube-api-access-gtsdv\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752473 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-config\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752508 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752560 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/325ba002-fdd6-411d-bd9b-104bc011abd6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.752602 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855546 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855782 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-profile-collector-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855804 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855860 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-mountpoint-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855876 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855893 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7spnr\" (UniqueName: \"kubernetes.io/projected/b96edc58-6fe3-4af4-a252-3e967b42eb40-kube-api-access-7spnr\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855921 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46k5t\" (UniqueName: \"kubernetes.io/projected/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-kube-api-access-46k5t\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855937 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cacf6995-d78c-4d35-a289-4ed9f982becd-metrics-tls\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855952 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5p78\" (UniqueName: \"kubernetes.io/projected/09c5bc40-1e38-424d-935a-456542a5e818-kube-api-access-f5p78\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855969 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/30e61e44-e06d-4ca6-a943-a0a595acb393-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.855987 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx4m2\" (UniqueName: \"kubernetes.io/projected/1821fc95-952c-44e2-9d50-5458327620e9-kube-api-access-xx4m2\") pod \"downloads-7954f5f757-2stcx\" (UID: \"1821fc95-952c-44e2-9d50-5458327620e9\") " pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856063 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3586eca-4355-49c8-b3df-f5a50b3b0381-config\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856101 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/325ba002-fdd6-411d-bd9b-104bc011abd6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856142 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856157 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-images\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856183 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856221 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/09c5bc40-1e38-424d-935a-456542a5e818-tmpfs\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856253 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tclnh\" (UniqueName: \"kubernetes.io/projected/880a13c1-2abe-4aa8-9124-2ab04374e740-kube-api-access-tclnh\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856287 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrlwm\" (UniqueName: \"kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856322 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp8nn\" (UniqueName: \"kubernetes.io/projected/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-kube-api-access-rp8nn\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856338 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-encryption-config\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856352 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856389 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-apiservice-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856412 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-registration-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856436 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cacf6995-d78c-4d35-a289-4ed9f982becd-trusted-ca\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856453 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73b081c8-15c0-40af-a11b-0a381bde2e72-proxy-tls\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856467 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-socket-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856484 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-node-bootstrap-token\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856521 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856536 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfvgx\" (UniqueName: \"kubernetes.io/projected/f49183f5-e6c6-4938-b02f-de17d2d16ecc-kube-api-access-pfvgx\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856551 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7hh9\" (UniqueName: \"kubernetes.io/projected/f3586eca-4355-49c8-b3df-f5a50b3b0381-kube-api-access-z7hh9\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856641 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-webhook-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856655 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-certs\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856669 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-plugins-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856694 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856710 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856725 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfjqm\" (UniqueName: \"kubernetes.io/projected/0f2b5450-9c1d-4491-85f7-006cd0647f92-kube-api-access-mfjqm\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856741 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856775 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b96edc58-6fe3-4af4-a252-3e967b42eb40-metrics-tls\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856790 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7r46\" (UniqueName: \"kubernetes.io/projected/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-kube-api-access-b7r46\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856816 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f2b5450-9c1d-4491-85f7-006cd0647f92-cert\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856841 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-etcd-client\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856857 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856907 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk6tf\" (UniqueName: \"kubernetes.io/projected/73b081c8-15c0-40af-a11b-0a381bde2e72-kube-api-access-dk6tf\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856924 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk448\" (UniqueName: \"kubernetes.io/projected/e2937254-b481-4958-8891-c0b9c2b85983-kube-api-access-wk448\") pod \"migrator-59844c95c7-v8wjb\" (UID: \"e2937254-b481-4958-8891-c0b9c2b85983\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856950 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856965 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-config\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.856990 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d874525c-7914-4f51-a51e-45cfc97e0fe0-config-volume\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857019 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-auth-proxy-config\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857035 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdth9\" (UniqueName: \"kubernetes.io/projected/30e61e44-e06d-4ca6-a943-a0a595acb393-kube-api-access-cdth9\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857089 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73b081c8-15c0-40af-a11b-0a381bde2e72-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857118 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwbzc\" (UniqueName: \"kubernetes.io/projected/dd612234-f5b5-494b-b405-878b48935d15-kube-api-access-jwbzc\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: E1124 17:05:47.857190 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.357158772 +0000 UTC m=+143.680040332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857260 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd612234-f5b5-494b-b405-878b48935d15-audit-dir\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857339 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a67e9663-0794-412b-b976-c0c50f39184e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857383 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-srv-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857407 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857433 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-trusted-ca\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857458 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24zsj\" (UniqueName: \"kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857488 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857511 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-serving-cert\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857536 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-audit-policies\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857590 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jzq2\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-kube-api-access-4jzq2\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857621 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857644 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48331584-ccc8-4953-ab9d-738087c5f55b-serving-cert\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857667 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtsdv\" (UniqueName: \"kubernetes.io/projected/48331584-ccc8-4953-ab9d-738087c5f55b-kube-api-access-gtsdv\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857725 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-config\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857751 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l9jg\" (UniqueName: \"kubernetes.io/projected/b07b23cd-015a-4115-b960-a9d9687dc74b-kube-api-access-9l9jg\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857775 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857796 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-key\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.857819 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/880a13c1-2abe-4aa8-9124-2ab04374e740-proxy-tls\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858177 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858338 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3586eca-4355-49c8-b3df-f5a50b3b0381-serving-cert\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858419 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858470 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-cabundle\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858515 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/325ba002-fdd6-411d-bd9b-104bc011abd6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858569 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8xq5\" (UniqueName: \"kubernetes.io/projected/d874525c-7914-4f51-a51e-45cfc97e0fe0-kube-api-access-z8xq5\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858594 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-csi-data-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858641 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccgqj\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858664 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/325ba002-fdd6-411d-bd9b-104bc011abd6-config\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858686 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-799zt\" (UniqueName: \"kubernetes.io/projected/da7e7bd6-cc1b-46a5-b80b-1671226fda49-kube-api-access-799zt\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858709 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d874525c-7914-4f51-a51e-45cfc97e0fe0-metrics-tls\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.858828 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq2z9\" (UniqueName: \"kubernetes.io/projected/a67e9663-0794-412b-b976-c0c50f39184e-kube-api-access-xq2z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.861448 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-config\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.862412 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.863699 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48331584-ccc8-4953-ab9d-738087c5f55b-trusted-ca\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.868188 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd612234-f5b5-494b-b405-878b48935d15-audit-dir\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.871700 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.871994 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.873154 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.873221 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.873844 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-audit-policies\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.874053 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/325ba002-fdd6-411d-bd9b-104bc011abd6-config\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.876140 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-encryption-config\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.876152 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.876766 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd612234-f5b5-494b-b405-878b48935d15-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.877395 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.877670 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48331584-ccc8-4953-ab9d-738087c5f55b-serving-cert\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.880075 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-etcd-client\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.880357 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/325ba002-fdd6-411d-bd9b-104bc011abd6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.881211 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd612234-f5b5-494b-b405-878b48935d15-serving-cert\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.891242 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b96edc58-6fe3-4af4-a252-3e967b42eb40-metrics-tls\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.905681 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwbzc\" (UniqueName: \"kubernetes.io/projected/dd612234-f5b5-494b-b405-878b48935d15-kube-api-access-jwbzc\") pod \"apiserver-7bbb656c7d-xlpph\" (UID: \"dd612234-f5b5-494b-b405-878b48935d15\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.932854 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt"] Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.933209 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtsdv\" (UniqueName: \"kubernetes.io/projected/48331584-ccc8-4953-ab9d-738087c5f55b-kube-api-access-gtsdv\") pod \"console-operator-58897d9998-29k4r\" (UID: \"48331584-ccc8-4953-ab9d-738087c5f55b\") " pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.934885 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.941301 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rsr75"] Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.946448 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/325ba002-fdd6-411d-bd9b-104bc011abd6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kpclh\" (UID: \"325ba002-fdd6-411d-bd9b-104bc011abd6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961340 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-profile-collector-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961378 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-mountpoint-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961405 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46k5t\" (UniqueName: \"kubernetes.io/projected/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-kube-api-access-46k5t\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961424 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/30e61e44-e06d-4ca6-a943-a0a595acb393-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961441 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cacf6995-d78c-4d35-a289-4ed9f982becd-metrics-tls\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961455 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5p78\" (UniqueName: \"kubernetes.io/projected/09c5bc40-1e38-424d-935a-456542a5e818-kube-api-access-f5p78\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961479 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3586eca-4355-49c8-b3df-f5a50b3b0381-config\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961503 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961520 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-images\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961538 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961554 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961579 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/09c5bc40-1e38-424d-935a-456542a5e818-tmpfs\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961595 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tclnh\" (UniqueName: \"kubernetes.io/projected/880a13c1-2abe-4aa8-9124-2ab04374e740-kube-api-access-tclnh\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961613 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrlwm\" (UniqueName: \"kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961634 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961656 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-apiservice-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961669 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-registration-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961690 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73b081c8-15c0-40af-a11b-0a381bde2e72-proxy-tls\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961704 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cacf6995-d78c-4d35-a289-4ed9f982becd-trusted-ca\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961717 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-socket-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961731 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-node-bootstrap-token\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961748 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961766 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfvgx\" (UniqueName: \"kubernetes.io/projected/f49183f5-e6c6-4938-b02f-de17d2d16ecc-kube-api-access-pfvgx\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961789 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7hh9\" (UniqueName: \"kubernetes.io/projected/f3586eca-4355-49c8-b3df-f5a50b3b0381-kube-api-access-z7hh9\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961833 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-webhook-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961861 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfjqm\" (UniqueName: \"kubernetes.io/projected/0f2b5450-9c1d-4491-85f7-006cd0647f92-kube-api-access-mfjqm\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961880 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-certs\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961901 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-plugins-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961925 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f2b5450-9c1d-4491-85f7-006cd0647f92-cert\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961943 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7r46\" (UniqueName: \"kubernetes.io/projected/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-kube-api-access-b7r46\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961960 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961977 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk6tf\" (UniqueName: \"kubernetes.io/projected/73b081c8-15c0-40af-a11b-0a381bde2e72-kube-api-access-dk6tf\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.961993 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk448\" (UniqueName: \"kubernetes.io/projected/e2937254-b481-4958-8891-c0b9c2b85983-kube-api-access-wk448\") pod \"migrator-59844c95c7-v8wjb\" (UID: \"e2937254-b481-4958-8891-c0b9c2b85983\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962029 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-config\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962053 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdth9\" (UniqueName: \"kubernetes.io/projected/30e61e44-e06d-4ca6-a943-a0a595acb393-kube-api-access-cdth9\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962070 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d874525c-7914-4f51-a51e-45cfc97e0fe0-config-volume\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962084 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-auth-proxy-config\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962099 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73b081c8-15c0-40af-a11b-0a381bde2e72-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962121 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a67e9663-0794-412b-b976-c0c50f39184e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962136 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-srv-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962151 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962167 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24zsj\" (UniqueName: \"kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962184 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jzq2\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-kube-api-access-4jzq2\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962213 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962230 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l9jg\" (UniqueName: \"kubernetes.io/projected/b07b23cd-015a-4115-b960-a9d9687dc74b-kube-api-access-9l9jg\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962245 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-key\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962261 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/880a13c1-2abe-4aa8-9124-2ab04374e740-proxy-tls\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962283 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962297 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3586eca-4355-49c8-b3df-f5a50b3b0381-serving-cert\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962312 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-cabundle\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962328 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8xq5\" (UniqueName: \"kubernetes.io/projected/d874525c-7914-4f51-a51e-45cfc97e0fe0-kube-api-access-z8xq5\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962342 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-csi-data-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962357 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-799zt\" (UniqueName: \"kubernetes.io/projected/da7e7bd6-cc1b-46a5-b80b-1671226fda49-kube-api-access-799zt\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962378 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d874525c-7914-4f51-a51e-45cfc97e0fe0-metrics-tls\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.962394 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq2z9\" (UniqueName: \"kubernetes.io/projected/a67e9663-0794-412b-b976-c0c50f39184e-kube-api-access-xq2z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:47 crc kubenswrapper[4760]: E1124 17:05:47.964067 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.464047443 +0000 UTC m=+143.786929043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.964326 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3586eca-4355-49c8-b3df-f5a50b3b0381-config\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.964569 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-images\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.965242 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.965750 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/09c5bc40-1e38-424d-935a-456542a5e818-tmpfs\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966115 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-registration-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966169 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-mountpoint-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966192 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966627 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d874525c-7914-4f51-a51e-45cfc97e0fe0-config-volume\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966863 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-csi-data-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.969726 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.975031 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.976211 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3586eca-4355-49c8-b3df-f5a50b3b0381-serving-cert\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.976414 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.978380 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-apiservice-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:47 crc kubenswrapper[4760]: W1124 17:05:47.979426 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffe37073_7f98_4d38_a3a3_7b3e1f3df449.slice/crio-a49759c8e10acd4789664696fbe4dc246d95656a91e31df8c93a385a914e88d3 WatchSource:0}: Error finding container a49759c8e10acd4789664696fbe4dc246d95656a91e31df8c93a385a914e88d3: Status 404 returned error can't find the container with id a49759c8e10acd4789664696fbe4dc246d95656a91e31df8c93a385a914e88d3 Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.979902 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-srv-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.980747 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a67e9663-0794-412b-b976-c0c50f39184e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.980897 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-socket-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.982701 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.989423 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f49183f5-e6c6-4938-b02f-de17d2d16ecc-plugins-dir\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.966121 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-profile-collector-cert\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.990319 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d874525c-7914-4f51-a51e-45cfc97e0fe0-metrics-tls\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.990607 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-config\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.990670 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-certs\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.996685 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73b081c8-15c0-40af-a11b-0a381bde2e72-proxy-tls\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.996852 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.997664 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-cabundle\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:47 crc kubenswrapper[4760]: I1124 17:05:47.997681 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b07b23cd-015a-4115-b960-a9d9687dc74b-signing-key\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.001556 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73b081c8-15c0-40af-a11b-0a381bde2e72-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.002318 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f2b5450-9c1d-4491-85f7-006cd0647f92-cert\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.005326 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/880a13c1-2abe-4aa8-9124-2ab04374e740-auth-proxy-config\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.006678 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cacf6995-d78c-4d35-a289-4ed9f982becd-metrics-tls\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.006735 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cacf6995-d78c-4d35-a289-4ed9f982becd-trusted-ca\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.007045 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/09c5bc40-1e38-424d-935a-456542a5e818-webhook-cert\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.008591 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/da7e7bd6-cc1b-46a5-b80b-1671226fda49-node-bootstrap-token\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.009141 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/30e61e44-e06d-4ca6-a943-a0a595acb393-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.010591 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.012200 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp8nn\" (UniqueName: \"kubernetes.io/projected/7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5-kube-api-access-rp8nn\") pod \"openshift-apiserver-operator-796bbdcf4f-cs6nq\" (UID: \"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.013494 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/880a13c1-2abe-4aa8-9124-2ab04374e740-proxy-tls\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.013506 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7spnr\" (UniqueName: \"kubernetes.io/projected/b96edc58-6fe3-4af4-a252-3e967b42eb40-kube-api-access-7spnr\") pod \"dns-operator-744455d44c-rcsnv\" (UID: \"b96edc58-6fe3-4af4-a252-3e967b42eb40\") " pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.029880 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccgqj\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.060530 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx4m2\" (UniqueName: \"kubernetes.io/projected/1821fc95-952c-44e2-9d50-5458327620e9-kube-api-access-xx4m2\") pod \"downloads-7954f5f757-2stcx\" (UID: \"1821fc95-952c-44e2-9d50-5458327620e9\") " pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.064021 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.064859 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.564578103 +0000 UTC m=+143.887717110 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.090767 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq2z9\" (UniqueName: \"kubernetes.io/projected/a67e9663-0794-412b-b976-c0c50f39184e-kube-api-access-xq2z9\") pod \"control-plane-machine-set-operator-78cbb6b69f-sldtw\" (UID: \"a67e9663-0794-412b-b976-c0c50f39184e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.105638 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.111233 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7eb5f696-cfab-4dd3-813b-8b4c389aa6c3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-pqtx7\" (UID: \"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.123332 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.134171 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46k5t\" (UniqueName: \"kubernetes.io/projected/7d65d0f7-afc5-4170-9ea8-edabc0e0cf33-kube-api-access-46k5t\") pod \"multus-admission-controller-857f4d67dd-bqtxg\" (UID: \"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.142108 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.153622 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tclnh\" (UniqueName: \"kubernetes.io/projected/880a13c1-2abe-4aa8-9124-2ab04374e740-kube-api-access-tclnh\") pod \"machine-config-operator-74547568cd-n2m2r\" (UID: \"880a13c1-2abe-4aa8-9124-2ab04374e740\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.166356 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.166746 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.66673475 +0000 UTC m=+143.989616300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.177458 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.186310 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrlwm\" (UniqueName: \"kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm\") pod \"collect-profiles-29400060-h5bsv\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.188382 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5p78\" (UniqueName: \"kubernetes.io/projected/09c5bc40-1e38-424d-935a-456542a5e818-kube-api-access-f5p78\") pod \"packageserver-d55dfcdfc-sgkrm\" (UID: \"09c5bc40-1e38-424d-935a-456542a5e818\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.218365 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdth9\" (UniqueName: \"kubernetes.io/projected/30e61e44-e06d-4ca6-a943-a0a595acb393-kube-api-access-cdth9\") pod \"package-server-manager-789f6589d5-h7w7t\" (UID: \"30e61e44-e06d-4ca6-a943-a0a595acb393\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.227080 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.230718 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l9jg\" (UniqueName: \"kubernetes.io/projected/b07b23cd-015a-4115-b960-a9d9687dc74b-kube-api-access-9l9jg\") pod \"service-ca-9c57cc56f-cxr8b\" (UID: \"b07b23cd-015a-4115-b960-a9d9687dc74b\") " pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.255941 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8xq5\" (UniqueName: \"kubernetes.io/projected/d874525c-7914-4f51-a51e-45cfc97e0fe0-kube-api-access-z8xq5\") pod \"dns-default-bkvpl\" (UID: \"d874525c-7914-4f51-a51e-45cfc97e0fe0\") " pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.265089 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.267186 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.267654 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.76763735 +0000 UTC m=+144.090518910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.270212 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.279521 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rt9dn"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.280585 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.281799 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.284521 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.288924 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24zsj\" (UniqueName: \"kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj\") pod \"marketplace-operator-79b997595-wwglm\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.310990 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jzq2\" (UniqueName: \"kubernetes.io/projected/cacf6995-d78c-4d35-a289-4ed9f982becd-kube-api-access-4jzq2\") pod \"ingress-operator-5b745b69d9-md2fv\" (UID: \"cacf6995-d78c-4d35-a289-4ed9f982becd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.316994 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.333772 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-799zt\" (UniqueName: \"kubernetes.io/projected/da7e7bd6-cc1b-46a5-b80b-1671226fda49-kube-api-access-799zt\") pod \"machine-config-server-2gzg7\" (UID: \"da7e7bd6-cc1b-46a5-b80b-1671226fda49\") " pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.345501 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.346315 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.347449 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfvgx\" (UniqueName: \"kubernetes.io/projected/f49183f5-e6c6-4938-b02f-de17d2d16ecc-kube-api-access-pfvgx\") pod \"csi-hostpathplugin-bqz55\" (UID: \"f49183f5-e6c6-4938-b02f-de17d2d16ecc\") " pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.347523 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.349615 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.353706 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.366024 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.367650 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" event={"ID":"308d97cc-5fc4-4a28-883b-bb545c57132b","Type":"ContainerStarted","Data":"031ae5e1d68a71ac2b8ac8fd054756c4da13e7c52716a83662653e78a0eb9090"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.367694 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" event={"ID":"308d97cc-5fc4-4a28-883b-bb545c57132b","Type":"ContainerStarted","Data":"5419291b65de09694abc9a5c7ff94d5441f1e807488a604da1f7d8de11cc4f93"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.368801 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.369263 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.869248901 +0000 UTC m=+144.192130461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.371966 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2m97z" event={"ID":"f4e5eb55-04d5-4d78-8c6e-73eb5233c269","Type":"ContainerStarted","Data":"0bdeebaff857980ded92386d9d2ec77f101e7fb50a6cd22323402a702f7d3a6e"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.372057 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2m97z" event={"ID":"f4e5eb55-04d5-4d78-8c6e-73eb5233c269","Type":"ContainerStarted","Data":"c0719fb4cf7016169e143391f2612325ef5e2ba6dc8cffcd6353f25a99847d95"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.374753 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.376142 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7hh9\" (UniqueName: \"kubernetes.io/projected/f3586eca-4355-49c8-b3df-f5a50b3b0381-kube-api-access-z7hh9\") pod \"service-ca-operator-777779d784-smj94\" (UID: \"f3586eca-4355-49c8-b3df-f5a50b3b0381\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.379088 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:48 crc kubenswrapper[4760]: W1124 17:05:48.379123 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod212051f7_d251_4005_a25d_ac53d864a70c.slice/crio-d7e3f17565498d11a4f6f86c560b26726b40b2c6a2139098a5c20055ea52f6f7 WatchSource:0}: Error finding container d7e3f17565498d11a4f6f86c560b26726b40b2c6a2139098a5c20055ea52f6f7: Status 404 returned error can't find the container with id d7e3f17565498d11a4f6f86c560b26726b40b2c6a2139098a5c20055ea52f6f7 Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.380653 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" event={"ID":"95f656bb-1008-45d4-a2e2-484e574e7767","Type":"ContainerStarted","Data":"a21abc6df94d108a78d02a10419bd75d30b98724b51e7f62c8e4687b4d7b6860"} Nov 24 17:05:48 crc kubenswrapper[4760]: W1124 17:05:48.382363 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53abeab9_107f_44fd_84b7_c641f8583fbd.slice/crio-db40d1390f9b27a6a56288502ee6b5f825006088c26151c0fa9248f4f389040f WatchSource:0}: Error finding container db40d1390f9b27a6a56288502ee6b5f825006088c26151c0fa9248f4f389040f: Status 404 returned error can't find the container with id db40d1390f9b27a6a56288502ee6b5f825006088c26151c0fa9248f4f389040f Nov 24 17:05:48 crc kubenswrapper[4760]: W1124 17:05:48.384351 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod843e455c_4df4_4e25_91f1_456b61889db5.slice/crio-c0ff39e99da7d0d1f20854775560ef1ca990f38ce08ba55dfd988247740f29c9 WatchSource:0}: Error finding container c0ff39e99da7d0d1f20854775560ef1ca990f38ce08ba55dfd988247740f29c9: Status 404 returned error can't find the container with id c0ff39e99da7d0d1f20854775560ef1ca990f38ce08ba55dfd988247740f29c9 Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.385547 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.389345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk448\" (UniqueName: \"kubernetes.io/projected/e2937254-b481-4958-8891-c0b9c2b85983-kube-api-access-wk448\") pod \"migrator-59844c95c7-v8wjb\" (UID: \"e2937254-b481-4958-8891-c0b9c2b85983\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.394870 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.399471 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.413951 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.416801 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7r46\" (UniqueName: \"kubernetes.io/projected/c2682b9f-d828-4b3b-958e-3ee8dfb4a090-kube-api-access-b7r46\") pod \"olm-operator-6b444d44fb-87zlw\" (UID: \"c2682b9f-d828-4b3b-958e-3ee8dfb4a090\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.421463 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" event={"ID":"474f6772-b7de-416d-bf20-9cd6326bfb37","Type":"ContainerStarted","Data":"687f0dc6cdcf4d29e6a4f0ec550596304b10bb3912bc45075060af26a017b6ea"} Nov 24 17:05:48 crc kubenswrapper[4760]: W1124 17:05:48.430257 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod325ba002_fdd6_411d_bd9b_104bc011abd6.slice/crio-65f4950ce5f4b2edd4c91ee7953e86941d6d7514fb6d72383dc0d5b1099e072a WatchSource:0}: Error finding container 65f4950ce5f4b2edd4c91ee7953e86941d6d7514fb6d72383dc0d5b1099e072a: Status 404 returned error can't find the container with id 65f4950ce5f4b2edd4c91ee7953e86941d6d7514fb6d72383dc0d5b1099e072a Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.430594 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" event={"ID":"d767e519-28a3-4c13-b1a7-ddd63d6b30b8","Type":"ContainerStarted","Data":"7e4e7ede342968751c6a3b38d64706d9c84bbaeec1e2e56906a8399832aae6df"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.430631 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" event={"ID":"d767e519-28a3-4c13-b1a7-ddd63d6b30b8","Type":"ContainerStarted","Data":"cdf5c29688c4ed2925ecb9659d8910d36c46e1a751fba40e92486de66b6f1021"} Nov 24 17:05:48 crc kubenswrapper[4760]: W1124 17:05:48.431573 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e26988e_e709_4bf3_81a3_8a4666e7e0da.slice/crio-e318057935b8a0e81b5a0efa6bb9921722e1215b4d5abc90b1c67081d704b758 WatchSource:0}: Error finding container e318057935b8a0e81b5a0efa6bb9921722e1215b4d5abc90b1c67081d704b758: Status 404 returned error can't find the container with id e318057935b8a0e81b5a0efa6bb9921722e1215b4d5abc90b1c67081d704b758 Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.433957 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.437689 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfjqm\" (UniqueName: \"kubernetes.io/projected/0f2b5450-9c1d-4491-85f7-006cd0647f92-kube-api-access-mfjqm\") pod \"ingress-canary-rp7d8\" (UID: \"0f2b5450-9c1d-4491-85f7-006cd0647f92\") " pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.449093 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk6tf\" (UniqueName: \"kubernetes.io/projected/73b081c8-15c0-40af-a11b-0a381bde2e72-kube-api-access-dk6tf\") pod \"machine-config-controller-84d6567774-c8zjd\" (UID: \"73b081c8-15c0-40af-a11b-0a381bde2e72\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.466674 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.469849 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.471099 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:48.971077388 +0000 UTC m=+144.293958948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.474135 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.482565 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2gzg7" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.491603 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.500327 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rp7d8" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.514608 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" event={"ID":"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3","Type":"ContainerStarted","Data":"be2e8b0f9c694c0a8a0644ed87fd1b97942fefdd70c93c88087b685864c85f29"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.537582 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" podStartSLOduration=122.537568644 podStartE2EDuration="2m2.537568644s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:48.536716829 +0000 UTC m=+143.859598379" watchObservedRunningTime="2025-11-24 17:05:48.537568644 +0000 UTC m=+143.860450194" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.541021 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.544922 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" event={"ID":"ad5ccde0-f1f1-4f17-81f4-1d14f7f40ba7","Type":"ContainerStarted","Data":"39de7386be72c0e31abff565454f681771e66dae8454040a054dcdf97b858d86"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.545447 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.563976 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" event={"ID":"44091a4f-586a-44f5-934d-294bbe4458c0","Type":"ContainerStarted","Data":"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.564354 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" event={"ID":"44091a4f-586a-44f5-934d-294bbe4458c0","Type":"ContainerStarted","Data":"f84363d016c15a3a7d6517522c4dec51050a5c823c1268fd48c853dc2d8f993f"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.572178 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.572567 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.072537385 +0000 UTC m=+144.395418935 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.585802 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rcsnv"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.588819 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" event={"ID":"ffe37073-7f98-4d38-a3a3-7b3e1f3df449","Type":"ContainerStarted","Data":"a49759c8e10acd4789664696fbe4dc246d95656a91e31df8c93a385a914e88d3"} Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.591623 4760 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-xxkwx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.591679 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.592150 4760 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-bmf26 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.592232 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.593223 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-29k4r"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.667458 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.691796 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.692448 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.192433173 +0000 UTC m=+144.515314723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.700670 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bqtxg"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.706171 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.797159 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.797625 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.297605035 +0000 UTC m=+144.620486585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.883420 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nnqqr" podStartSLOduration=123.883403648 podStartE2EDuration="2m3.883403648s" podCreationTimestamp="2025-11-24 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:48.850149335 +0000 UTC m=+144.173030905" watchObservedRunningTime="2025-11-24 17:05:48.883403648 +0000 UTC m=+144.206285198" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.883891 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" podStartSLOduration=121.883883291 podStartE2EDuration="2m1.883883291s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:48.8810198 +0000 UTC m=+144.203901360" watchObservedRunningTime="2025-11-24 17:05:48.883883291 +0000 UTC m=+144.206764841" Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.900262 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:48 crc kubenswrapper[4760]: E1124 17:05:48.900563 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.400547414 +0000 UTC m=+144.723428964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.901781 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2stcx"] Nov 24 17:05:48 crc kubenswrapper[4760]: I1124 17:05:48.942403 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.001206 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.001504 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.501489096 +0000 UTC m=+144.824370646 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.083427 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.103142 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.103519 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.603504828 +0000 UTC m=+144.926386368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.140680 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.205878 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.206191 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.706180219 +0000 UTC m=+145.029061769 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.315281 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.315620 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.815605532 +0000 UTC m=+145.138487082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.339479 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.351488 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.355017 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.417104 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.424648 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-bkvpl"] Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.436843 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:49.936824889 +0000 UTC m=+145.259706439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.465595 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cxr8b"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.526843 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.527150 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.027133599 +0000 UTC m=+145.350015149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.568777 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-smj94"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.568997 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.569032 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.569042 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-bqz55"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.569053 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rp7d8"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.635070 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.635362 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.135351677 +0000 UTC m=+145.458233227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.697451 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" event={"ID":"53abeab9-107f-44fd-84b7-c641f8583fbd","Type":"ContainerStarted","Data":"db40d1390f9b27a6a56288502ee6b5f825006088c26151c0fa9248f4f389040f"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.714140 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" event={"ID":"325ba002-fdd6-411d-bd9b-104bc011abd6","Type":"ContainerStarted","Data":"65f4950ce5f4b2edd4c91ee7953e86941d6d7514fb6d72383dc0d5b1099e072a"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.717185 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" event={"ID":"ce3ac3d9-4e06-4e26-acf1-b5b93f32dfe3","Type":"ContainerStarted","Data":"aed5df4790cb7ebc41bc945b85c4cc68d6ad922108da02b3c23d8819f3256854"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.721662 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" event={"ID":"d483076b-151b-465f-beec-94e8b65379ef","Type":"ContainerStarted","Data":"31d3b3b64010cf46408c21b376a6fb09f14fb939edcaf383709ef644b6581699"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.721699 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" event={"ID":"d483076b-151b-465f-beec-94e8b65379ef","Type":"ContainerStarted","Data":"d04eb684c417c87e688b8b2f98c14e543c948951991ec8b1ca95493fe6635184"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.723573 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" event={"ID":"30e61e44-e06d-4ca6-a943-a0a595acb393","Type":"ContainerStarted","Data":"c644fc0d073fe66147f3be8e574175b15a14db942971f149276232b1b5a53e9f"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.724458 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qr42v" event={"ID":"4e26988e-e709-4bf3-81a3-8a4666e7e0da","Type":"ContainerStarted","Data":"e318057935b8a0e81b5a0efa6bb9921722e1215b4d5abc90b1c67081d704b758"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.731828 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" event={"ID":"dd612234-f5b5-494b-b405-878b48935d15","Type":"ContainerStarted","Data":"6556699c972d46ee74bb018f312b484d126ebf673053be73ec48d417c8c73016"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.737895 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.738350 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.238336507 +0000 UTC m=+145.561218057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.744888 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" event={"ID":"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33","Type":"ContainerStarted","Data":"7cf7065801589be0389b09484c9761689959f629c730813683d4897e3669efa0"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.749516 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" event={"ID":"82a0fb7c-7c66-41a6-9ebb-5608d47ce382","Type":"ContainerStarted","Data":"03887378f56467101cada4a624b999db410ce3a0a4eec25184f0b8000e760e7f"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.789070 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" event={"ID":"474f6772-b7de-416d-bf20-9cd6326bfb37","Type":"ContainerStarted","Data":"394ca301672ec031f072bd0c9f119450e6119e7af202f5c6f988b5b40f04dc3e"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.790375 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-29k4r" event={"ID":"48331584-ccc8-4953-ab9d-738087c5f55b","Type":"ContainerStarted","Data":"303179b5cca8a5be0e5b8fb3e28c0fe68dba44916fca855b1d23b976730b0520"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.801039 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.801562 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" event={"ID":"cebb48e4-432a-42f0-9e13-7a11ab680535","Type":"ContainerStarted","Data":"003b22ebea5816baee146c2ee5b18f2ce610c2a036a56bfd0434343061d9e89d"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.801582 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" event={"ID":"cebb48e4-432a-42f0-9e13-7a11ab680535","Type":"ContainerStarted","Data":"0a906b050d39cad6cab58478e2eb26e961371ba059e33614b86c477585ac429f"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.802988 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw"] Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.839064 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.839979 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.339963579 +0000 UTC m=+145.662845129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.853189 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" event={"ID":"843e455c-4df4-4e25-91f1-456b61889db5","Type":"ContainerStarted","Data":"20ff85bda61c6e5f5612147ea431613b1c53dd19571453a84d77d4e047b61fb6"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.853232 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" event={"ID":"843e455c-4df4-4e25-91f1-456b61889db5","Type":"ContainerStarted","Data":"c0ff39e99da7d0d1f20854775560ef1ca990f38ce08ba55dfd988247740f29c9"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.862503 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" event={"ID":"cacf6995-d78c-4d35-a289-4ed9f982becd","Type":"ContainerStarted","Data":"c8d58c5722a48e965565ea1e032bbdfcb47eef089c807ae1ebee2940cf9b0d0e"} Nov 24 17:05:49 crc kubenswrapper[4760]: W1124 17:05:49.919748 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73b081c8_15c0_40af_a11b_0a381bde2e72.slice/crio-9404ed2cdb1b9cab62c4da31191ee8054bf44d1f2890bf3c2f5559cbbe11e00e WatchSource:0}: Error finding container 9404ed2cdb1b9cab62c4da31191ee8054bf44d1f2890bf3c2f5559cbbe11e00e: Status 404 returned error can't find the container with id 9404ed2cdb1b9cab62c4da31191ee8054bf44d1f2890bf3c2f5559cbbe11e00e Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.926316 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" event={"ID":"a67e9663-0794-412b-b976-c0c50f39184e","Type":"ContainerStarted","Data":"557ceaecb0d897c30ac09a538c5debabd7c5212bd3a5af816fe89789306b0880"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.931685 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" event={"ID":"ffe37073-7f98-4d38-a3a3-7b3e1f3df449","Type":"ContainerStarted","Data":"7e36fd5d6a7086124114f4f51c676e337457e0c90a706d3a9463a9823cd869a8"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.938630 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2stcx" event={"ID":"1821fc95-952c-44e2-9d50-5458327620e9","Type":"ContainerStarted","Data":"bfa1572b0ce4a8a7605ccba96e366e16e2d4f9e38c90bd492d3aa8b3c6f2e22c"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.940458 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:49 crc kubenswrapper[4760]: E1124 17:05:49.941688 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.441673762 +0000 UTC m=+145.764555312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.950321 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" event={"ID":"09c5bc40-1e38-424d-935a-456542a5e818","Type":"ContainerStarted","Data":"bde06676fc34099210c36cbd2150e0c6099b272f4f78532e380d45eb6ddd7ed0"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.957908 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" event={"ID":"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3","Type":"ContainerStarted","Data":"e56e78674f986739950fb56f947daef584d3c0bef8e2be5a533604afa5e5d918"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.959447 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2gzg7" event={"ID":"da7e7bd6-cc1b-46a5-b80b-1671226fda49","Type":"ContainerStarted","Data":"c6b16e9518c36b2537584fa3a3d0166d4e5c4ddfb32036bb67aaf1f0107edbbb"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.960316 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" event={"ID":"880a13c1-2abe-4aa8-9124-2ab04374e740","Type":"ContainerStarted","Data":"3ce7d936222bdb60a873875bf2e82f5193b93b0776f15a0c393771ff42280065"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.969942 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" event={"ID":"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5","Type":"ContainerStarted","Data":"044a99e183b478a4480b0d4fc3867fab25c686882ba187f242b8f436ac5dd5e8"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.986712 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" event={"ID":"b96edc58-6fe3-4af4-a252-3e967b42eb40","Type":"ContainerStarted","Data":"5856b91867a0eff97734bb8d26b97bd278552cdcb10eb381ebd1b02c67042aef"} Nov 24 17:05:49 crc kubenswrapper[4760]: I1124 17:05:49.990454 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" event={"ID":"212051f7-d251-4005-a25d-ac53d864a70c","Type":"ContainerStarted","Data":"d7e3f17565498d11a4f6f86c560b26726b40b2c6a2139098a5c20055ea52f6f7"} Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.001500 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.002349 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.014375 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.050644 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.052953 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.552935597 +0000 UTC m=+145.875817137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.095855 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.151047 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zb8vd" podStartSLOduration=124.151030388 podStartE2EDuration="2m4.151030388s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.124498506 +0000 UTC m=+145.447380056" watchObservedRunningTime="2025-11-24 17:05:50.151030388 +0000 UTC m=+145.473911938" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.151748 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.151833 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p46tt" podStartSLOduration=123.151827461 podStartE2EDuration="2m3.151827461s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.150110792 +0000 UTC m=+145.472992342" watchObservedRunningTime="2025-11-24 17:05:50.151827461 +0000 UTC m=+145.474709011" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.152988 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.652971553 +0000 UTC m=+145.975853103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.190391 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" podStartSLOduration=124.190368503 podStartE2EDuration="2m4.190368503s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.188335206 +0000 UTC m=+145.511216756" watchObservedRunningTime="2025-11-24 17:05:50.190368503 +0000 UTC m=+145.513250053" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.243385 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" podStartSLOduration=124.243368366 podStartE2EDuration="2m4.243368366s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.223916044 +0000 UTC m=+145.546797594" watchObservedRunningTime="2025-11-24 17:05:50.243368366 +0000 UTC m=+145.566249916" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.257629 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-phspq" podStartSLOduration=123.25761199 podStartE2EDuration="2m3.25761199s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.255798158 +0000 UTC m=+145.578679708" watchObservedRunningTime="2025-11-24 17:05:50.25761199 +0000 UTC m=+145.580493620" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.260855 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.261289 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.761276514 +0000 UTC m=+146.084158064 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.362189 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.362572 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.862557675 +0000 UTC m=+146.185439225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.371669 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" podStartSLOduration=123.371646593 podStartE2EDuration="2m3.371646593s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.300507426 +0000 UTC m=+145.623388976" watchObservedRunningTime="2025-11-24 17:05:50.371646593 +0000 UTC m=+145.694528133" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.429502 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.439281 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:50 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:50 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:50 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.439313 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.464143 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.464492 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:50.964478215 +0000 UTC m=+146.287359765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.482273 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-rsr75" podStartSLOduration=124.482256639 podStartE2EDuration="2m4.482256639s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.459926726 +0000 UTC m=+145.782808276" watchObservedRunningTime="2025-11-24 17:05:50.482256639 +0000 UTC m=+145.805138189" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.528468 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r7tdx" podStartSLOduration=125.528449839 podStartE2EDuration="2m5.528449839s" podCreationTimestamp="2025-11-24 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.481873878 +0000 UTC m=+145.804755428" watchObservedRunningTime="2025-11-24 17:05:50.528449839 +0000 UTC m=+145.851331389" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.565443 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.566125 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.066107746 +0000 UTC m=+146.388989296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.668713 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.669130 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.169114387 +0000 UTC m=+146.491995937 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.770559 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.770974 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.270956504 +0000 UTC m=+146.593838054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.872321 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.874243 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.374217402 +0000 UTC m=+146.697098952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.878215 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.899286 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" podStartSLOduration=124.899272492 podStartE2EDuration="2m4.899272492s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:50.897592085 +0000 UTC m=+146.220473635" watchObservedRunningTime="2025-11-24 17:05:50.899272492 +0000 UTC m=+146.222154042" Nov 24 17:05:50 crc kubenswrapper[4760]: I1124 17:05:50.974443 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:50 crc kubenswrapper[4760]: E1124 17:05:50.975153 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.475137153 +0000 UTC m=+146.798018703 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.031283 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2gzg7" event={"ID":"da7e7bd6-cc1b-46a5-b80b-1671226fda49","Type":"ContainerStarted","Data":"eeb96ec75b9078c3336ce807cc35a6de0f066eeaaca8278daf86484bfd73da6f"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.032992 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" event={"ID":"7eb5f696-cfab-4dd3-813b-8b4c389aa6c3","Type":"ContainerStarted","Data":"b69883422ba26cde537781accf0a0b20a18e68c97b6f792b9cd56416d4aa7701"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.040774 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" event={"ID":"cacf6995-d78c-4d35-a289-4ed9f982becd","Type":"ContainerStarted","Data":"e89640f19401984f7c914885210c8c560051538666278ff794cdf184d192c7ac"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.059576 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-c8j2v" podStartSLOduration=125.059558887 podStartE2EDuration="2m5.059558887s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.058932949 +0000 UTC m=+146.381814489" watchObservedRunningTime="2025-11-24 17:05:51.059558887 +0000 UTC m=+146.382440437" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.078445 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.078848 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.578836253 +0000 UTC m=+146.901717793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.090563 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-2m97z" podStartSLOduration=125.090544085 podStartE2EDuration="2m5.090544085s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.090320719 +0000 UTC m=+146.413202269" watchObservedRunningTime="2025-11-24 17:05:51.090544085 +0000 UTC m=+146.413425635" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.095547 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sldtw" event={"ID":"a67e9663-0794-412b-b976-c0c50f39184e","Type":"ContainerStarted","Data":"2b38944b5849d0dd6f4576ab75d395b78c16ad6851243492ae08f0b19703b8ff"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.127121 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qr42v" event={"ID":"4e26988e-e709-4bf3-81a3-8a4666e7e0da","Type":"ContainerStarted","Data":"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.143523 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" event={"ID":"325ba002-fdd6-411d-bd9b-104bc011abd6","Type":"ContainerStarted","Data":"1638241ed22964d5069255fd7a8cde19bb539a09f2cda75a9e177f6561e73ad5"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.151456 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" event={"ID":"f49183f5-e6c6-4938-b02f-de17d2d16ecc","Type":"ContainerStarted","Data":"d163d125ddaf9c0224c992d0ed6a48833f4c6a7a832089bf186fdaa59ada3fbb"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.174773 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rp7d8" event={"ID":"0f2b5450-9c1d-4491-85f7-006cd0647f92","Type":"ContainerStarted","Data":"ed3969c47c25b77f6333f41af65206fa158cedf406a56297dff052286271646d"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.181320 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.181439 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.681422112 +0000 UTC m=+147.004303652 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.181672 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.183128 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.68312012 +0000 UTC m=+147.006001670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.191776 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" event={"ID":"b07b23cd-015a-4115-b960-a9d9687dc74b","Type":"ContainerStarted","Data":"f67b5f0c2ab9b6d746ab6ddef93922d622f4ba1d927765906ff0280b4c283305"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.191814 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" event={"ID":"b07b23cd-015a-4115-b960-a9d9687dc74b","Type":"ContainerStarted","Data":"8aceed7f53e77f585f5e5d87c13f75ff7b84b79f801724ed621df0aa963346a0"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.207690 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" event={"ID":"212051f7-d251-4005-a25d-ac53d864a70c","Type":"ContainerStarted","Data":"af27d7a983c653d196b58c3bbf769d358bf07d15048db039d8ae97222453926b"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.235306 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-29k4r" event={"ID":"48331584-ccc8-4953-ab9d-738087c5f55b","Type":"ContainerStarted","Data":"806e7445f20d990d655d1091cab0363eea7b2d8c48bf57b5d532272ab70fa5c9"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.236221 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.247213 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-cxr8b" podStartSLOduration=124.247197047 podStartE2EDuration="2m4.247197047s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.246121746 +0000 UTC m=+146.569003296" watchObservedRunningTime="2025-11-24 17:05:51.247197047 +0000 UTC m=+146.570078597" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.249824 4760 patch_prober.go:28] interesting pod/console-operator-58897d9998-29k4r container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.249873 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-29k4r" podUID="48331584-ccc8-4953-ab9d-738087c5f55b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.283246 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.284511 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.784496444 +0000 UTC m=+147.107377984 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.310294 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" event={"ID":"880a13c1-2abe-4aa8-9124-2ab04374e740","Type":"ContainerStarted","Data":"5256164ac4b589be75e436b3a80caff9e12d60222bb4e6abeca97af04797814b"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.337481 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" event={"ID":"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33","Type":"ContainerStarted","Data":"939608615a90638dbf3a7ce013c71c2cd4e407b8ec96d4398ed398115e679195"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.339336 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rp7d8" podStartSLOduration=6.339314639 podStartE2EDuration="6.339314639s" podCreationTimestamp="2025-11-24 17:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.338097854 +0000 UTC m=+146.660979404" watchObservedRunningTime="2025-11-24 17:05:51.339314639 +0000 UTC m=+146.662196189" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.340181 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-qr42v" podStartSLOduration=125.340173123 podStartE2EDuration="2m5.340173123s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.282249851 +0000 UTC m=+146.605131401" watchObservedRunningTime="2025-11-24 17:05:51.340173123 +0000 UTC m=+146.663054673" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.366067 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" event={"ID":"09c5bc40-1e38-424d-935a-456542a5e818","Type":"ContainerStarted","Data":"328e7c09f123ff8f121e26f1f40529834dae31951fe7d0becb3f44ae0d3f7956"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.366566 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.377857 4760 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-sgkrm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.377900 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" podUID="09c5bc40-1e38-424d-935a-456542a5e818" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.378737 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4rksk" podStartSLOduration=124.378722296 podStartE2EDuration="2m4.378722296s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.37744674 +0000 UTC m=+146.700328290" watchObservedRunningTime="2025-11-24 17:05:51.378722296 +0000 UTC m=+146.701603836" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.383101 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" event={"ID":"73b081c8-15c0-40af-a11b-0a381bde2e72","Type":"ContainerStarted","Data":"9404ed2cdb1b9cab62c4da31191ee8054bf44d1f2890bf3c2f5559cbbe11e00e"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.389896 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.391211 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.891193039 +0000 UTC m=+147.214074659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.399145 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.401331 4760 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wwglm container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.401369 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.441360 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:51 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:51 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:51 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.441431 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.441971 4760 generic.go:334] "Generic (PLEG): container finished" podID="dd612234-f5b5-494b-b405-878b48935d15" containerID="0bad32b34961cb50cdb6ea7eb1bd90726531989a8df4c6c6b82ea4c9b91f58f6" exitCode=0 Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.442104 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" event={"ID":"dd612234-f5b5-494b-b405-878b48935d15","Type":"ContainerDied","Data":"0bad32b34961cb50cdb6ea7eb1bd90726531989a8df4c6c6b82ea4c9b91f58f6"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.482774 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kpclh" podStartSLOduration=125.482749525 podStartE2EDuration="2m5.482749525s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.460105853 +0000 UTC m=+146.782987403" watchObservedRunningTime="2025-11-24 17:05:51.482749525 +0000 UTC m=+146.805631075" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.490718 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.491908 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.991881824 +0000 UTC m=+147.314763374 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.495613 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.496407 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:51.996390842 +0000 UTC m=+147.319272402 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.544067 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.544113 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.544128 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.544140 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" event={"ID":"30e61e44-e06d-4ca6-a943-a0a595acb393","Type":"ContainerStarted","Data":"4484f987abc2ad7f9136801a8f060f269d379fe459b7812345a8fa4303892755"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.549111 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-pqtx7" podStartSLOduration=125.549084066 podStartE2EDuration="2m5.549084066s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.542078898 +0000 UTC m=+146.864960448" watchObservedRunningTime="2025-11-24 17:05:51.549084066 +0000 UTC m=+146.871965606" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.557369 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" event={"ID":"b96edc58-6fe3-4af4-a252-3e967b42eb40","Type":"ContainerStarted","Data":"331ae1229653190111d54c4e64f0b0162d79c65eadb1ca424832fe49ede25586"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.582272 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-bkvpl" event={"ID":"d874525c-7914-4f51-a51e-45cfc97e0fe0","Type":"ContainerStarted","Data":"0ba2d29aff817c1f01fff86c9205c1a549a0b65bc2ba4ebf8f8a3278a46e4c1d"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.582370 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-bkvpl" event={"ID":"d874525c-7914-4f51-a51e-45cfc97e0fe0","Type":"ContainerStarted","Data":"33675d835193bb477a6b1c91e080f7af71bf745d1b158f7a4afe5a11aff535f1"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.598557 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.599768 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.099750553 +0000 UTC m=+147.422632103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.605422 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" event={"ID":"893b8ae8-4ab4-474e-b6bc-ed926c279c44","Type":"ContainerStarted","Data":"d62bd2526b03b4f64ca182719af5d9ae3f9508c63c6eb248e6f08019734e9a9e"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.606718 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-2gzg7" podStartSLOduration=6.60670422 podStartE2EDuration="6.60670422s" podCreationTimestamp="2025-11-24 17:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.605502916 +0000 UTC m=+146.928384466" watchObservedRunningTime="2025-11-24 17:05:51.60670422 +0000 UTC m=+146.929585770" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.614438 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" event={"ID":"c2682b9f-d828-4b3b-958e-3ee8dfb4a090","Type":"ContainerStarted","Data":"2b4b1f2aa880357c942ea6952b059bb5ad116c0098fe708c1b487f760ede45a7"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.615362 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.623874 4760 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-87zlw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.623919 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" podUID="c2682b9f-d828-4b3b-958e-3ee8dfb4a090" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.624272 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" event={"ID":"7a832ad8-66ea-4fd0-89d0-5b28e7f98dd5","Type":"ContainerStarted","Data":"50080b050829886ddfaf3473be97ead49d4ce02dedf6ae9b0e78f8c71c92e7b8"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.636716 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" event={"ID":"e2937254-b481-4958-8891-c0b9c2b85983","Type":"ContainerStarted","Data":"03ad5e553a0e0748fb69a4dc13fc51c353dfa7148a86fa08c51bf9aa1cb641f1"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.636759 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" event={"ID":"e2937254-b481-4958-8891-c0b9c2b85983","Type":"ContainerStarted","Data":"2aef2d02039364d41edea1168e22b859544cff323017308808943d0f3db8c02c"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.648477 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" podStartSLOduration=124.648458854 podStartE2EDuration="2m4.648458854s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.632649635 +0000 UTC m=+146.955531185" watchObservedRunningTime="2025-11-24 17:05:51.648458854 +0000 UTC m=+146.971340404" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.661165 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.661794 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" podStartSLOduration=125.661775571 podStartE2EDuration="2m5.661775571s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.656211553 +0000 UTC m=+146.979093103" watchObservedRunningTime="2025-11-24 17:05:51.661775571 +0000 UTC m=+146.984657121" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.662096 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.666271 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" event={"ID":"f3586eca-4355-49c8-b3df-f5a50b3b0381","Type":"ContainerStarted","Data":"b42185a86631728030bc03101967d1666cd1d9b85303528a110748895c0d2026"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.666326 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" event={"ID":"f3586eca-4355-49c8-b3df-f5a50b3b0381","Type":"ContainerStarted","Data":"95814b094e9e1f13ac5b171b9621d94fc630eb005a74d3e82317e11c169dc448"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.667780 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2stcx" event={"ID":"1821fc95-952c-44e2-9d50-5458327620e9","Type":"ContainerStarted","Data":"5e128b860811efc3c16d4dec3ff60f46858e0545f8bd27d59cd53e37e51dba38"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.668826 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.670505 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" event={"ID":"53abeab9-107f-44fd-84b7-c641f8583fbd","Type":"ContainerStarted","Data":"12cae17c921fc0b50c9cf93365f2c88b1e98864052c8df8d312f19b2eddfc82f"} Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.672488 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.679181 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.693405 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.693461 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.694099 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" podStartSLOduration=124.694083327 podStartE2EDuration="2m4.694083327s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.693071828 +0000 UTC m=+147.015953378" watchObservedRunningTime="2025-11-24 17:05:51.694083327 +0000 UTC m=+147.016964877" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.699906 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.700452 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.200437777 +0000 UTC m=+147.523319327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.754271 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-29k4r" podStartSLOduration=125.754247593 podStartE2EDuration="2m5.754247593s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.73863989 +0000 UTC m=+147.061521430" watchObservedRunningTime="2025-11-24 17:05:51.754247593 +0000 UTC m=+147.077129143" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.799694 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" podStartSLOduration=124.799677411 podStartE2EDuration="2m4.799677411s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.798968581 +0000 UTC m=+147.121850131" watchObservedRunningTime="2025-11-24 17:05:51.799677411 +0000 UTC m=+147.122558961" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.806622 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.807141 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.307124272 +0000 UTC m=+147.630005822 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.807464 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.807690 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.808028 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmcq6\" (UniqueName: \"kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.808454 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.809565 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.309553061 +0000 UTC m=+147.632434611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.844513 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" podStartSLOduration=124.844494172 podStartE2EDuration="2m4.844494172s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.843455412 +0000 UTC m=+147.166336962" watchObservedRunningTime="2025-11-24 17:05:51.844494172 +0000 UTC m=+147.167375722" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.856577 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.858191 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.860163 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.871615 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.876414 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-2stcx" podStartSLOduration=125.876397896 podStartE2EDuration="2m5.876397896s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.874855963 +0000 UTC m=+147.197737513" watchObservedRunningTime="2025-11-24 17:05:51.876397896 +0000 UTC m=+147.199279446" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.909653 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.909814 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.909852 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.909907 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmcq6\" (UniqueName: \"kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: E1124 17:05:51.910238 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.410224725 +0000 UTC m=+147.733106275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.910568 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.910791 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.914830 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-smj94" podStartSLOduration=124.914815465 podStartE2EDuration="2m4.914815465s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.899085209 +0000 UTC m=+147.221966759" watchObservedRunningTime="2025-11-24 17:05:51.914815465 +0000 UTC m=+147.237697015" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.915783 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cs6nq" podStartSLOduration=125.915776003 podStartE2EDuration="2m5.915776003s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.913488598 +0000 UTC m=+147.236370148" watchObservedRunningTime="2025-11-24 17:05:51.915776003 +0000 UTC m=+147.238657553" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.940470 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmcq6\" (UniqueName: \"kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6\") pod \"certified-operators-xrzxz\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.944111 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" podStartSLOduration=124.944076635 podStartE2EDuration="2m4.944076635s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.941401209 +0000 UTC m=+147.264282759" watchObservedRunningTime="2025-11-24 17:05:51.944076635 +0000 UTC m=+147.266958185" Nov 24 17:05:51 crc kubenswrapper[4760]: I1124 17:05:51.998730 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.012576 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" podStartSLOduration=125.012556627 podStartE2EDuration="2m5.012556627s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:51.966366877 +0000 UTC m=+147.289248427" watchObservedRunningTime="2025-11-24 17:05:52.012556627 +0000 UTC m=+147.335438177" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.023408 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd72g\" (UniqueName: \"kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.023457 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.023615 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.023648 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.024207 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.524192116 +0000 UTC m=+147.847073666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.047386 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-9w6q4" podStartSLOduration=126.047370374 podStartE2EDuration="2m6.047370374s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.020500072 +0000 UTC m=+147.343381622" watchObservedRunningTime="2025-11-24 17:05:52.047370374 +0000 UTC m=+147.370251924" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.055725 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.056909 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.065805 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.124748 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.125081 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.125195 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd72g\" (UniqueName: \"kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.125222 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.125770 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.125865 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.625849549 +0000 UTC m=+147.948731099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.126362 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.164513 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd72g\" (UniqueName: \"kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g\") pod \"community-operators-l96mk\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.188316 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.227508 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.227556 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.227614 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.227658 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8pmp\" (UniqueName: \"kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.227940 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.727926882 +0000 UTC m=+148.050808432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.253153 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.254071 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.300555 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.329555 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.329757 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.329793 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.329873 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8pmp\" (UniqueName: \"kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.330208 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.830193911 +0000 UTC m=+148.153075461 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.330536 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.330737 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.360914 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8pmp\" (UniqueName: \"kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp\") pod \"certified-operators-k84dx\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.389514 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.429712 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:52 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:52 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:52 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.429978 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.431718 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.431802 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.431827 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.431887 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9s8z\" (UniqueName: \"kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.432277 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:52.932244235 +0000 UTC m=+148.255125785 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.512205 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.533941 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.534267 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.534345 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.534410 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9s8z\" (UniqueName: \"kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.534782 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.034764841 +0000 UTC m=+148.357646391 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.535188 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.535494 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.563440 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.564918 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9s8z\" (UniqueName: \"kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z\") pod \"community-operators-bsld7\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.571026 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nc4ft" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.586328 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.636656 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.636957 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.136942838 +0000 UTC m=+148.459824378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.702098 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" event={"ID":"7d65d0f7-afc5-4170-9ea8-edabc0e0cf33","Type":"ContainerStarted","Data":"74db5ea9661037c6fcd3883f303d6a4d30e58abe8d7643405677022c94e2dba3"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.723631 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-bqtxg" podStartSLOduration=125.723611646 podStartE2EDuration="2m5.723611646s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.723559944 +0000 UTC m=+148.046441494" watchObservedRunningTime="2025-11-24 17:05:52.723611646 +0000 UTC m=+148.046493196" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.725153 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" event={"ID":"893b8ae8-4ab4-474e-b6bc-ed926c279c44","Type":"ContainerStarted","Data":"d8ca9cc287671f1004fe493e824a3a6bf0612ee8b572b7528a7a9621dee80e2d"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.737540 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.738790 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.238772935 +0000 UTC m=+148.561654485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.751278 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" event={"ID":"30e61e44-e06d-4ca6-a943-a0a595acb393","Type":"ContainerStarted","Data":"018dc54dd4d8003a260d184c12bebeeb90ccaa5420868b294013abad0719a3bb"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.754554 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" event={"ID":"c2682b9f-d828-4b3b-958e-3ee8dfb4a090","Type":"ContainerStarted","Data":"83d7003806b123fd713a58c1f3b30ad6b2f9df1fcfcac250cb76b62909577258"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.758341 4760 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-87zlw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.758383 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" podUID="c2682b9f-d828-4b3b-958e-3ee8dfb4a090" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.803136 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" event={"ID":"b96edc58-6fe3-4af4-a252-3e967b42eb40","Type":"ContainerStarted","Data":"a5bd762b3e29ada7ec7d5babc19894dd65fed1c43dbf8256cc26af998ea747fd"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.807363 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" event={"ID":"dd612234-f5b5-494b-b405-878b48935d15","Type":"ContainerStarted","Data":"ea608d1674b9c7e4da4f020f0e8fc79ea149edaca5620a960e2eee0346887161"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.809309 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rt9dn" event={"ID":"843e455c-4df4-4e25-91f1-456b61889db5","Type":"ContainerStarted","Data":"c18d55ab9cf3b53e491112777952d6e8d3dd70388bbedc767935d1321c8a34f5"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.810881 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" event={"ID":"880a13c1-2abe-4aa8-9124-2ab04374e740","Type":"ContainerStarted","Data":"8b642f105561bfe283333c61f3129beac9638d00ab8a2f0c9e1a93928d087c8b"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.821713 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" event={"ID":"73b081c8-15c0-40af-a11b-0a381bde2e72","Type":"ContainerStarted","Data":"d695ed137cddeaf9c49c40f01be79ed6bbd36d0c37bf4632018b457c931cd5ee"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.821762 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" event={"ID":"73b081c8-15c0-40af-a11b-0a381bde2e72","Type":"ContainerStarted","Data":"0429c47fd92f290d9c3a30f0cae4f50718196f39a30b824d8054aac1fcb52f9a"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.823194 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rp7d8" event={"ID":"0f2b5450-9c1d-4491-85f7-006cd0647f92","Type":"ContainerStarted","Data":"53a4364c754d107262b151a5e855da26298f8c584a086a3b7d7dd80d514ba0c8"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.826253 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rcsnv" podStartSLOduration=126.826227725 podStartE2EDuration="2m6.826227725s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.817659692 +0000 UTC m=+148.140541252" watchObservedRunningTime="2025-11-24 17:05:52.826227725 +0000 UTC m=+148.149109295" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.834088 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v8wjb" event={"ID":"e2937254-b481-4958-8891-c0b9c2b85983","Type":"ContainerStarted","Data":"0391ee43392b5d5e94ffdabd341ed3dffeae77252280f9f408f615003a681abd"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.837991 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" podStartSLOduration=125.837977348 podStartE2EDuration="2m5.837977348s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.835198799 +0000 UTC m=+148.158080349" watchObservedRunningTime="2025-11-24 17:05:52.837977348 +0000 UTC m=+148.160858898" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.838381 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerStarted","Data":"c0adebb787cb5f3fb4a0317257a7aa47f59c536af5f1dac0ef15258880cfa304"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.849537 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.850769 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.35074652 +0000 UTC m=+148.673628070 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.860617 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-n2m2r" podStartSLOduration=125.860595889 podStartE2EDuration="2m5.860595889s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.85886669 +0000 UTC m=+148.181748250" watchObservedRunningTime="2025-11-24 17:05:52.860595889 +0000 UTC m=+148.183477439" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.873711 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" event={"ID":"82a0fb7c-7c66-41a6-9ebb-5608d47ce382","Type":"ContainerStarted","Data":"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.874722 4760 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wwglm container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.874750 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.886142 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-c8zjd" podStartSLOduration=125.886121963 podStartE2EDuration="2m5.886121963s" podCreationTimestamp="2025-11-24 17:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.880572866 +0000 UTC m=+148.203454416" watchObservedRunningTime="2025-11-24 17:05:52.886121963 +0000 UTC m=+148.209003513" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.890247 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.890638 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-bkvpl" event={"ID":"d874525c-7914-4f51-a51e-45cfc97e0fe0","Type":"ContainerStarted","Data":"91dd9fff6aece41cf2d8b82074e879e9daa683f403262f90a5f7f45714d0cf92"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.891801 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-bkvpl" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.913387 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" event={"ID":"cacf6995-d78c-4d35-a289-4ed9f982becd","Type":"ContainerStarted","Data":"99ff1e12da5423362745d3f75f338195f4f74423e57fb3c4a76c5f739e2ddbe6"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.918258 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-bkvpl" podStartSLOduration=7.918227054 podStartE2EDuration="7.918227054s" podCreationTimestamp="2025-11-24 17:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.915168077 +0000 UTC m=+148.238049627" watchObservedRunningTime="2025-11-24 17:05:52.918227054 +0000 UTC m=+148.241108604" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.928218 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerStarted","Data":"87d3a601f5b4ac2e5fd5429b72e3b948b326eb9c4cc3766f80510e6602467beb"} Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.929675 4760 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-sgkrm container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.929714 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" podUID="09c5bc40-1e38-424d-935a-456542a5e818" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.930139 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.930162 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.930447 4760 patch_prober.go:28] interesting pod/console-operator-58897d9998-29k4r container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.930502 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-29k4r" podUID="48331584-ccc8-4953-ab9d-738087c5f55b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.938390 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-md2fv" podStartSLOduration=126.938375055 podStartE2EDuration="2m6.938375055s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:52.937949953 +0000 UTC m=+148.260831523" watchObservedRunningTime="2025-11-24 17:05:52.938375055 +0000 UTC m=+148.261256605" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.950357 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.950662 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.450626712 +0000 UTC m=+148.773508262 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.951394 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:52 crc kubenswrapper[4760]: E1124 17:05:52.961305 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.461289254 +0000 UTC m=+148.784170794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.976916 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.976967 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.976980 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.992212 4760 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-xlpph container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 24 17:05:52 crc kubenswrapper[4760]: I1124 17:05:52.992255 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" podUID="dd612234-f5b5-494b-b405-878b48935d15" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.062670 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.062937 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.562911086 +0000 UTC m=+148.885792636 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.063877 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.066864 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.566852157 +0000 UTC m=+148.889733707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.165869 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.166212 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.666196194 +0000 UTC m=+148.989077744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.266907 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.267295 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.76728032 +0000 UTC m=+149.090161870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.371777 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.371946 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.871910797 +0000 UTC m=+149.194792357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.372039 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.372413 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.87240198 +0000 UTC m=+149.195283610 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.376456 4760 patch_prober.go:28] interesting pod/apiserver-76f77b778f-jrf7z container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]log ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]etcd ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/generic-apiserver-start-informers ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/max-in-flight-filter ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 24 17:05:53 crc kubenswrapper[4760]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 24 17:05:53 crc kubenswrapper[4760]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/project.openshift.io-projectcache ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 24 17:05:53 crc kubenswrapper[4760]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 24 17:05:53 crc kubenswrapper[4760]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 24 17:05:53 crc kubenswrapper[4760]: livez check failed Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.376525 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" podUID="474f6772-b7de-416d-bf20-9cd6326bfb37" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.446648 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:53 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:53 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:53 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.447066 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.472730 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.472903 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.972879919 +0000 UTC m=+149.295761469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.473050 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.473363 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:53.973353493 +0000 UTC m=+149.296235113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.574587 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.574724 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.074704146 +0000 UTC m=+149.397585696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.574774 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.575087 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.075079727 +0000 UTC m=+149.397961277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.650149 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.660403 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.665078 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.731544 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.731656 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.231636596 +0000 UTC m=+149.554518146 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.732256 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.732379 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.732401 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdfxv\" (UniqueName: \"kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.732459 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.732666 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.232649084 +0000 UTC m=+149.555530634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.738457 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.833590 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.833739 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.33371615 +0000 UTC m=+149.656597710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.833862 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.834119 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.834152 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdfxv\" (UniqueName: \"kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.834198 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.834362 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.334322997 +0000 UTC m=+149.657204547 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.834573 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.835016 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.860845 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdfxv\" (UniqueName: \"kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv\") pod \"redhat-marketplace-l8l4r\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.934091 4760 generic.go:334] "Generic (PLEG): container finished" podID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerID="9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8" exitCode=0 Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.934208 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerDied","Data":"9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.934400 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerStarted","Data":"7258de4d638efa027aaf1cb3a7eff3726d12a24d9c71f4cc8399eca054240734"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.934839 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.934967 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.43495057 +0000 UTC m=+149.757832120 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.935223 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:53 crc kubenswrapper[4760]: E1124 17:05:53.935514 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.435502056 +0000 UTC m=+149.758383596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.936062 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.936239 4760 generic.go:334] "Generic (PLEG): container finished" podID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerID="aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347" exitCode=0 Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.936293 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerDied","Data":"aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.938245 4760 generic.go:334] "Generic (PLEG): container finished" podID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerID="97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227" exitCode=0 Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.938310 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerDied","Data":"97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.939970 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" event={"ID":"f49183f5-e6c6-4938-b02f-de17d2d16ecc","Type":"ContainerStarted","Data":"2c565b1f37006407f64e666770f79d1dbc3a8429978b70e5483b69e7368a2f98"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.941720 4760 generic.go:334] "Generic (PLEG): container finished" podID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerID="e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d" exitCode=0 Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.941856 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerDied","Data":"e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.941932 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerStarted","Data":"c72c8b5ad7552e8bee792820095552668107142565d64aa05fa64e423c8227df"} Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.942361 4760 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wwglm container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.942397 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.942409 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.942430 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.943021 4760 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-87zlw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 17:05:53 crc kubenswrapper[4760]: I1124 17:05:53.943052 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" podUID="c2682b9f-d828-4b3b-958e-3ee8dfb4a090" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.040136 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.040312 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.540278076 +0000 UTC m=+149.863159626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.040752 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.042181 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.54216382 +0000 UTC m=+149.865045440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.046531 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.047498 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.050262 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.064208 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.142707 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.146223 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.646205189 +0000 UTC m=+149.969086739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.244829 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.245150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.245176 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.245217 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8vsh\" (UniqueName: \"kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.245250 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.745232897 +0000 UTC m=+150.068114447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.245331 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.255826 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346134 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.346307 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.846291272 +0000 UTC m=+150.169172822 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346749 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346805 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8vsh\" (UniqueName: \"kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346827 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346842 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346885 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346905 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.346928 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.347712 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.348061 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.348465 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.848450164 +0000 UTC m=+150.171331714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.349169 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.350381 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.350870 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.360767 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.370316 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-29k4r" Nov 24 17:05:54 crc kubenswrapper[4760]: W1124 17:05:54.370319 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3baee90_4b85_4f85_a756_67dcc7fb373a.slice/crio-50fcd972edddb593d767e837a982daa18dc76f6104acb84d63d1438aeb844fb1 WatchSource:0}: Error finding container 50fcd972edddb593d767e837a982daa18dc76f6104acb84d63d1438aeb844fb1: Status 404 returned error can't find the container with id 50fcd972edddb593d767e837a982daa18dc76f6104acb84d63d1438aeb844fb1 Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.378579 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8vsh\" (UniqueName: \"kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh\") pod \"redhat-marketplace-psr87\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.405133 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.432996 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:54 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:54 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:54 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.433110 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.449220 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.449579 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:54.94956066 +0000 UTC m=+150.272442210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.484673 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.486477 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.552386 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.552826 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.052806418 +0000 UTC m=+150.375688068 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.654408 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.654717 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.154695826 +0000 UTC m=+150.477577386 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.654976 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.660216 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.160196872 +0000 UTC m=+150.483078422 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.667410 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.763905 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.764074 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.264044947 +0000 UTC m=+150.586926497 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.764331 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.765183 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.265169279 +0000 UTC m=+150.588050829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.849608 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.859376 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.865898 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.866091 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.866231 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppj2p\" (UniqueName: \"kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.866310 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.866423 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.866556 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.366538553 +0000 UTC m=+150.689420103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.869555 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.959467 4760 generic.go:334] "Generic (PLEG): container finished" podID="893b8ae8-4ab4-474e-b6bc-ed926c279c44" containerID="d8ca9cc287671f1004fe493e824a3a6bf0612ee8b572b7528a7a9621dee80e2d" exitCode=0 Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.959576 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" event={"ID":"893b8ae8-4ab4-474e-b6bc-ed926c279c44","Type":"ContainerDied","Data":"d8ca9cc287671f1004fe493e824a3a6bf0612ee8b572b7528a7a9621dee80e2d"} Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.968577 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.968635 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppj2p\" (UniqueName: \"kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.968661 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.968690 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: E1124 17:05:54.972094 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.472080105 +0000 UTC m=+150.794961655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.972966 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.973521 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.975904 4760 generic.go:334] "Generic (PLEG): container finished" podID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerID="6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49" exitCode=0 Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.976084 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerDied","Data":"6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49"} Nov 24 17:05:54 crc kubenswrapper[4760]: I1124 17:05:54.976175 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerStarted","Data":"50fcd972edddb593d767e837a982daa18dc76f6104acb84d63d1438aeb844fb1"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.012300 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppj2p\" (UniqueName: \"kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p\") pod \"redhat-operators-dffm4\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:55 crc kubenswrapper[4760]: W1124 17:05:55.031414 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-4fe993fc03bc79d262c7aa9227a80f6d317cf8b4b5162db077fc99f817d2de7f WatchSource:0}: Error finding container 4fe993fc03bc79d262c7aa9227a80f6d317cf8b4b5162db077fc99f817d2de7f: Status 404 returned error can't find the container with id 4fe993fc03bc79d262c7aa9227a80f6d317cf8b4b5162db077fc99f817d2de7f Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.074519 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.074763 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.574744336 +0000 UTC m=+150.897625886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.075022 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.076233 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.576217188 +0000 UTC m=+150.899098738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.114899 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.176660 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.177183 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.6771668 +0000 UTC m=+151.000048350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.191268 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.267293 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.268849 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.277466 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.278186 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.278704 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.778686498 +0000 UTC m=+151.101568058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.382699 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.382863 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.382912 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlvwp\" (UniqueName: \"kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.382950 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.383061 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.883045777 +0000 UTC m=+151.205927327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.442384 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:55 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:55 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:55 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.442466 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.483649 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.483721 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.483748 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.483777 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlvwp\" (UniqueName: \"kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.484241 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:55.984229926 +0000 UTC m=+151.307111476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.484925 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.485346 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.530684 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlvwp\" (UniqueName: \"kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp\") pod \"redhat-operators-6bpzv\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.532249 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.533765 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.536629 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.538704 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.607029 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.607339 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.607850 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.608184 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.108159959 +0000 UTC m=+151.431041509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.608218 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.608298 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.608352 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.608804 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.108792157 +0000 UTC m=+151.431673707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.629994 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.711645 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.711856 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.711923 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.711957 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.211928581 +0000 UTC m=+151.534810131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.712035 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.711977 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.712544 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.212532279 +0000 UTC m=+151.535413829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.750496 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.818723 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.819049 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.319024497 +0000 UTC m=+151.641906037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.819678 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.819987 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.319979024 +0000 UTC m=+151.642860574 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.908328 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.921081 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:55 crc kubenswrapper[4760]: E1124 17:05:55.921490 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.421475042 +0000 UTC m=+151.744356592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.985800 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"1c37d0d17885b7d61fa5c14db49e845eb97a3d386d7c0a27caf80ac7cb9402fb"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.985840 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4fe993fc03bc79d262c7aa9227a80f6d317cf8b4b5162db077fc99f817d2de7f"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.989472 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"2e039a1e288c9ed998d61c3c4c38407b9024c3395719d95ce39468ff38cddafd"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.989511 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"cb2a50f2079097ae156d742f338dc6d77cfb7516ae2bef884266e3887dbcdd56"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.989691 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.995921 4760 generic.go:334] "Generic (PLEG): container finished" podID="80110199-7935-43a8-9025-f048cc22defb" containerID="d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e" exitCode=0 Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.995989 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerDied","Data":"d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e"} Nov 24 17:05:55 crc kubenswrapper[4760]: I1124 17:05:55.996044 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerStarted","Data":"7b5e3a3f8ee03469a71e0b401c9b32aec475d820953082625d25a06f5eb4b4da"} Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.000334 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"762ffc719b96998a36d995fd680170394049b14638d7f5ef64ea39a4038a85c9"} Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.000365 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"8768c60c25cc6b592eaac55a5de4ab7454a7702d0613be41f4b3ecc2832b6f49"} Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.016976 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.022376 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.023907 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.523894215 +0000 UTC m=+151.846775755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.024404 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerID="a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b" exitCode=0 Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.024464 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerDied","Data":"a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b"} Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.024496 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerStarted","Data":"7dd85ecf0101d68646d7c97efbf164d9d527fd3d460ffae290dc3fc383f2c812"} Nov 24 17:05:56 crc kubenswrapper[4760]: W1124 17:05:56.095774 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5013e377_ec2f_4559_afd3_d3aeab54b0ee.slice/crio-20d7d9934235cb6b109b5353b70e028e52cd4d5667e3e779695491e12631f61b WatchSource:0}: Error finding container 20d7d9934235cb6b109b5353b70e028e52cd4d5667e3e779695491e12631f61b: Status 404 returned error can't find the container with id 20d7d9934235cb6b109b5353b70e028e52cd4d5667e3e779695491e12631f61b Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.124284 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.124455 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.624432436 +0000 UTC m=+151.947313986 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.124535 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.125414 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.625406953 +0000 UTC m=+151.948288503 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.226065 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.226858 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.726841799 +0000 UTC m=+152.049723349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.328323 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.328854 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.828823921 +0000 UTC m=+152.151705471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.413392 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.429936 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.430356 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.930330109 +0000 UTC m=+152.253211659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.431680 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.432353 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:56.932344666 +0000 UTC m=+152.255226216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.434194 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:56 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:56 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:56 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.434255 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.525390 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.532130 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume\") pod \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.532648 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.532747 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.532792 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume\") pod \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.532846 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrlwm\" (UniqueName: \"kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm\") pod \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\" (UID: \"893b8ae8-4ab4-474e-b6bc-ed926c279c44\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.533426 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.033411841 +0000 UTC m=+152.356293391 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.534053 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume" (OuterVolumeSpecName: "config-volume") pod "893b8ae8-4ab4-474e-b6bc-ed926c279c44" (UID: "893b8ae8-4ab4-474e-b6bc-ed926c279c44"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.542341 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "893b8ae8-4ab4-474e-b6bc-ed926c279c44" (UID: "893b8ae8-4ab4-474e-b6bc-ed926c279c44"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.545383 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-jrf7z" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.576803 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm" (OuterVolumeSpecName: "kube-api-access-xrlwm") pod "893b8ae8-4ab4-474e-b6bc-ed926c279c44" (UID: "893b8ae8-4ab4-474e-b6bc-ed926c279c44"). InnerVolumeSpecName "kube-api-access-xrlwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.615052 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.634571 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.634716 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/893b8ae8-4ab4-474e-b6bc-ed926c279c44-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.634735 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/893b8ae8-4ab4-474e-b6bc-ed926c279c44-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.634745 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrlwm\" (UniqueName: \"kubernetes.io/projected/893b8ae8-4ab4-474e-b6bc-ed926c279c44-kube-api-access-xrlwm\") on node \"crc\" DevicePath \"\"" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.637973 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.137961216 +0000 UTC m=+152.460842766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.742870 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.743325 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.243308792 +0000 UTC m=+152.566190342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.848751 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.849087 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.349070921 +0000 UTC m=+152.671952471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.949782 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.949949 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.449925961 +0000 UTC m=+152.772807511 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:56 crc kubenswrapper[4760]: I1124 17:05:56.950461 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:56 crc kubenswrapper[4760]: E1124 17:05:56.950793 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.450782305 +0000 UTC m=+152.773663865 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.041047 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" event={"ID":"f49183f5-e6c6-4938-b02f-de17d2d16ecc","Type":"ContainerStarted","Data":"aabb3e28c24753bb1d4e2d40f79d3524316aa7c5a57ea52d2ee5b9ee6b22f8a3"} Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.043640 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a6b8490c-3eac-45d7-9ffe-d55abaae02d9","Type":"ContainerStarted","Data":"128e6a5486d4c4c5fe93ea0c30eaee004743de4568cef956655b0d04c5d3b81d"} Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.047365 4760 generic.go:334] "Generic (PLEG): container finished" podID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerID="c1d869b70341e0c3be4b6e9fbd51af9e109e891de7d10b29863a835c2ed7be2a" exitCode=0 Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.047427 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerDied","Data":"c1d869b70341e0c3be4b6e9fbd51af9e109e891de7d10b29863a835c2ed7be2a"} Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.047446 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerStarted","Data":"20d7d9934235cb6b109b5353b70e028e52cd4d5667e3e779695491e12631f61b"} Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.051172 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.051401 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.551381637 +0000 UTC m=+152.874263187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.051494 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.051775 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.551758118 +0000 UTC m=+152.874639668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.054486 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" event={"ID":"893b8ae8-4ab4-474e-b6bc-ed926c279c44","Type":"ContainerDied","Data":"d62bd2526b03b4f64ca182719af5d9ae3f9508c63c6eb248e6f08019734e9a9e"} Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.054519 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d62bd2526b03b4f64ca182719af5d9ae3f9508c63c6eb248e6f08019734e9a9e" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.054604 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.152630 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.152826 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.652783262 +0000 UTC m=+152.975664812 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.154684 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.154917 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.654906752 +0000 UTC m=+152.977788302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.257228 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.257648 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.757632425 +0000 UTC m=+153.080513975 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.359286 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.359723 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.859705949 +0000 UTC m=+153.182587499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.427494 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.430048 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:57 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:57 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:57 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.430083 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.460831 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.460971 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.960946419 +0000 UTC m=+153.283827969 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.461253 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.461572 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:57.961563947 +0000 UTC m=+153.284445497 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.562762 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.563449 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.063433205 +0000 UTC m=+153.386314755 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.606129 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.606959 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.608536 4760 patch_prober.go:28] interesting pod/console-f9d7485db-qr42v container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.608577 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qr42v" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.669696 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.670723 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.170710727 +0000 UTC m=+153.493592277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.684585 4760 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.774539 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.774704 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.274679364 +0000 UTC m=+153.597560914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.775117 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.775515 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.275415615 +0000 UTC m=+153.598297165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.887042 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.887371 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.387356859 +0000 UTC m=+153.710238409 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.985443 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.989063 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:57 crc kubenswrapper[4760]: E1124 17:05:57.989833 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.489816894 +0000 UTC m=+153.812698444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:57 crc kubenswrapper[4760]: I1124 17:05:57.991576 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlpph" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.090903 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.092379 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.592361151 +0000 UTC m=+153.915242711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.100765 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" event={"ID":"f49183f5-e6c6-4938-b02f-de17d2d16ecc","Type":"ContainerStarted","Data":"f0f04226813c87d0940cab74e8e9f72f93310dc1421a95e13e39420e2b496f4c"} Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.105686 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a6b8490c-3eac-45d7-9ffe-d55abaae02d9","Type":"ContainerDied","Data":"89f32ab0780f54b3291e1884f8c87fc88a932e93834cf1a0b11f59781099b03a"} Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.105779 4760 generic.go:334] "Generic (PLEG): container finished" podID="a6b8490c-3eac-45d7-9ffe-d55abaae02d9" containerID="89f32ab0780f54b3291e1884f8c87fc88a932e93834cf1a0b11f59781099b03a" exitCode=0 Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.193476 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.193817 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.693804987 +0000 UTC m=+154.016686537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.227712 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.227733 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.227759 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.227778 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.294996 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.295338 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.795323476 +0000 UTC m=+154.118205026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.366744 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-sgkrm" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.383744 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.396711 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.397021 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.896998258 +0000 UTC m=+154.219879808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.431564 4760 patch_prober.go:28] interesting pod/router-default-5444994796-2m97z container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 17:05:58 crc kubenswrapper[4760]: [-]has-synced failed: reason withheld Nov 24 17:05:58 crc kubenswrapper[4760]: [+]process-running ok Nov 24 17:05:58 crc kubenswrapper[4760]: healthz check failed Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.431619 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2m97z" podUID="f4e5eb55-04d5-4d78-8c6e-73eb5233c269" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.497450 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.498565 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 17:05:58.998551448 +0000 UTC m=+154.321432988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.603716 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:58 crc kubenswrapper[4760]: E1124 17:05:58.604182 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 17:05:59.104166592 +0000 UTC m=+154.427048142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2qfzh" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.659477 4760 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-24T17:05:57.684624031Z","Handler":null,"Name":""} Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.664539 4760 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.664600 4760 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.673139 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-87zlw" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.705824 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.711385 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.808827 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.859839 4760 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.859928 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:58 crc kubenswrapper[4760]: I1124 17:05:58.901091 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2qfzh\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.104272 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.120781 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" event={"ID":"f49183f5-e6c6-4938-b02f-de17d2d16ecc","Type":"ContainerStarted","Data":"164ec31798ef1c7e3d6fe60875408b375bfaeb5c7852a90f8c04536328d97576"} Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.140889 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-bqz55" podStartSLOduration=14.140873139 podStartE2EDuration="14.140873139s" podCreationTimestamp="2025-11-24 17:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:05:59.136417983 +0000 UTC m=+154.459299533" watchObservedRunningTime="2025-11-24 17:05:59.140873139 +0000 UTC m=+154.463754689" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.424293 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.430771 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.433513 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-2m97z" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.490081 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 24 17:05:59 crc kubenswrapper[4760]: W1124 17:05:59.490461 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28fd1340_959d_4b58_8ad7_c654176844e2.slice/crio-bf5bdd691afa594d270f670922b235fecc11fe251c82cc271ad4f91ad2ea67b1 WatchSource:0}: Error finding container bf5bdd691afa594d270f670922b235fecc11fe251c82cc271ad4f91ad2ea67b1: Status 404 returned error can't find the container with id bf5bdd691afa594d270f670922b235fecc11fe251c82cc271ad4f91ad2ea67b1 Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.501585 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.522541 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access\") pod \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.523208 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir\") pod \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\" (UID: \"a6b8490c-3eac-45d7-9ffe-d55abaae02d9\") " Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.523263 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a6b8490c-3eac-45d7-9ffe-d55abaae02d9" (UID: "a6b8490c-3eac-45d7-9ffe-d55abaae02d9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.523952 4760 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.527754 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a6b8490c-3eac-45d7-9ffe-d55abaae02d9" (UID: "a6b8490c-3eac-45d7-9ffe-d55abaae02d9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.626396 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a6b8490c-3eac-45d7-9ffe-d55abaae02d9-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.652937 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:05:59 crc kubenswrapper[4760]: E1124 17:05:59.653164 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b8490c-3eac-45d7-9ffe-d55abaae02d9" containerName="pruner" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.653177 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b8490c-3eac-45d7-9ffe-d55abaae02d9" containerName="pruner" Nov 24 17:05:59 crc kubenswrapper[4760]: E1124 17:05:59.653199 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="893b8ae8-4ab4-474e-b6bc-ed926c279c44" containerName="collect-profiles" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.653205 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="893b8ae8-4ab4-474e-b6bc-ed926c279c44" containerName="collect-profiles" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.653293 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b8490c-3eac-45d7-9ffe-d55abaae02d9" containerName="pruner" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.653302 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="893b8ae8-4ab4-474e-b6bc-ed926c279c44" containerName="collect-profiles" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.653664 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.655655 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.656223 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.657919 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.727533 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.727564 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.830428 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.830813 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.830604 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:05:59 crc kubenswrapper[4760]: I1124 17:05:59.848673 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.033559 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.142852 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.142852 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a6b8490c-3eac-45d7-9ffe-d55abaae02d9","Type":"ContainerDied","Data":"128e6a5486d4c4c5fe93ea0c30eaee004743de4568cef956655b0d04c5d3b81d"} Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.143130 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="128e6a5486d4c4c5fe93ea0c30eaee004743de4568cef956655b0d04c5d3b81d" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.145178 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" event={"ID":"28fd1340-959d-4b58-8ad7-c654176844e2","Type":"ContainerStarted","Data":"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e"} Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.145240 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" event={"ID":"28fd1340-959d-4b58-8ad7-c654176844e2","Type":"ContainerStarted","Data":"bf5bdd691afa594d270f670922b235fecc11fe251c82cc271ad4f91ad2ea67b1"} Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.145289 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.184800 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" podStartSLOduration=134.184779885 podStartE2EDuration="2m14.184779885s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:06:00.169149892 +0000 UTC m=+155.492031452" watchObservedRunningTime="2025-11-24 17:06:00.184779885 +0000 UTC m=+155.507661435" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.517391 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-bkvpl" Nov 24 17:06:00 crc kubenswrapper[4760]: I1124 17:06:00.533478 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 17:06:00 crc kubenswrapper[4760]: W1124 17:06:00.578376 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod0f7fe45c_96f1_488a_b95a_3adbbe5c6900.slice/crio-d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c WatchSource:0}: Error finding container d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c: Status 404 returned error can't find the container with id d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c Nov 24 17:06:01 crc kubenswrapper[4760]: I1124 17:06:01.192926 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0f7fe45c-96f1-488a-b95a-3adbbe5c6900","Type":"ContainerStarted","Data":"d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c"} Nov 24 17:06:02 crc kubenswrapper[4760]: I1124 17:06:02.202439 4760 generic.go:334] "Generic (PLEG): container finished" podID="0f7fe45c-96f1-488a-b95a-3adbbe5c6900" containerID="9842a81721b814a988d88a078c60fee12865cb21da3ca33380e12101ba99b9b8" exitCode=0 Nov 24 17:06:02 crc kubenswrapper[4760]: I1124 17:06:02.202481 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0f7fe45c-96f1-488a-b95a-3adbbe5c6900","Type":"ContainerDied","Data":"9842a81721b814a988d88a078c60fee12865cb21da3ca33380e12101ba99b9b8"} Nov 24 17:06:05 crc kubenswrapper[4760]: I1124 17:06:05.642558 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:06:05 crc kubenswrapper[4760]: I1124 17:06:05.643184 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:06:07 crc kubenswrapper[4760]: I1124 17:06:07.617385 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:06:07 crc kubenswrapper[4760]: I1124 17:06:07.620838 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.228472 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.228549 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.228671 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.228752 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.874828 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:06:08 crc kubenswrapper[4760]: I1124 17:06:08.888084 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e462626d-5645-4be7-89b4-383a4cde08f9-metrics-certs\") pod \"network-metrics-daemon-dz6vg\" (UID: \"e462626d-5645-4be7-89b4-383a4cde08f9\") " pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.092560 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-dz6vg" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.599688 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.688166 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir\") pod \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.688309 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access\") pod \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\" (UID: \"0f7fe45c-96f1-488a-b95a-3adbbe5c6900\") " Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.690068 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0f7fe45c-96f1-488a-b95a-3adbbe5c6900" (UID: "0f7fe45c-96f1-488a-b95a-3adbbe5c6900"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.692971 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0f7fe45c-96f1-488a-b95a-3adbbe5c6900" (UID: "0f7fe45c-96f1-488a-b95a-3adbbe5c6900"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.790968 4760 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:09 crc kubenswrapper[4760]: I1124 17:06:09.791041 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0f7fe45c-96f1-488a-b95a-3adbbe5c6900-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:10 crc kubenswrapper[4760]: I1124 17:06:10.268988 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0f7fe45c-96f1-488a-b95a-3adbbe5c6900","Type":"ContainerDied","Data":"d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c"} Nov 24 17:06:10 crc kubenswrapper[4760]: I1124 17:06:10.269381 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8f2555b01ba957274007d7b581fca3be7ee139397006f83f1616f1ff0ddd26c" Nov 24 17:06:10 crc kubenswrapper[4760]: I1124 17:06:10.269528 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.228164 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.228179 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.228500 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.228554 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.228627 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.229173 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.229219 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.229655 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"5e128b860811efc3c16d4dec3ff60f46858e0545f8bd27d59cd53e37e51dba38"} pod="openshift-console/downloads-7954f5f757-2stcx" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 24 17:06:18 crc kubenswrapper[4760]: I1124 17:06:18.229776 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" containerID="cri-o://5e128b860811efc3c16d4dec3ff60f46858e0545f8bd27d59cd53e37e51dba38" gracePeriod=2 Nov 24 17:06:19 crc kubenswrapper[4760]: I1124 17:06:19.115771 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:06:25 crc kubenswrapper[4760]: E1124 17:06:25.960250 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 24 17:06:25 crc kubenswrapper[4760]: E1124 17:06:25.960475 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ppj2p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dffm4_openshift-marketplace(0d6c1b39-d49f-49ab-bfa7-c28657529520): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:25 crc kubenswrapper[4760]: E1124 17:06:25.961737 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dffm4" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" Nov 24 17:06:28 crc kubenswrapper[4760]: I1124 17:06:28.228740 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:28 crc kubenswrapper[4760]: I1124 17:06:28.229201 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:28 crc kubenswrapper[4760]: I1124 17:06:28.395128 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7w7t" Nov 24 17:06:30 crc kubenswrapper[4760]: I1124 17:06:30.412450 4760 generic.go:334] "Generic (PLEG): container finished" podID="1821fc95-952c-44e2-9d50-5458327620e9" containerID="5e128b860811efc3c16d4dec3ff60f46858e0545f8bd27d59cd53e37e51dba38" exitCode=0 Nov 24 17:06:30 crc kubenswrapper[4760]: I1124 17:06:30.412538 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2stcx" event={"ID":"1821fc95-952c-44e2-9d50-5458327620e9","Type":"ContainerDied","Data":"5e128b860811efc3c16d4dec3ff60f46858e0545f8bd27d59cd53e37e51dba38"} Nov 24 17:06:30 crc kubenswrapper[4760]: E1124 17:06:30.882105 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dffm4" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.435426 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.435730 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x8pmp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-k84dx_openshift-marketplace(1423e3b0-3691-49ce-b29d-0f838db4ce3e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.437119 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-k84dx" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.444145 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.444256 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g8vsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-psr87_openshift-marketplace(80110199-7935-43a8-9025-f048cc22defb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:32 crc kubenswrapper[4760]: E1124 17:06:32.445691 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-psr87" podUID="80110199-7935-43a8-9025-f048cc22defb" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.474657 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-k84dx" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.474665 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-psr87" podUID="80110199-7935-43a8-9025-f048cc22defb" Nov 24 17:06:34 crc kubenswrapper[4760]: I1124 17:06:34.493987 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.558501 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.558648 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b9s8z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-bsld7_openshift-marketplace(cad1e7d4-527f-4dc5-831c-3eaa397c510c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.560231 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-bsld7" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.630143 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.630483 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fmcq6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-xrzxz_openshift-marketplace(443db8f4-7e0f-498f-9602-c93d1086f2cb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.631801 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-xrzxz" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.668272 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.668398 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jdfxv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-l8l4r_openshift-marketplace(e3baee90-4b85-4f85-a756-67dcc7fb373a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.669526 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-l8l4r" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.701483 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.701559 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kd72g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-l96mk_openshift-marketplace(1d5f4395-aa76-4909-9736-9f67f65b9125): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 17:06:34 crc kubenswrapper[4760]: E1124 17:06:34.702723 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-l96mk" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" Nov 24 17:06:34 crc kubenswrapper[4760]: I1124 17:06:34.902480 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-dz6vg"] Nov 24 17:06:34 crc kubenswrapper[4760]: W1124 17:06:34.909178 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode462626d_5645_4be7_89b4_383a4cde08f9.slice/crio-88c2c159b6bd4b00a3df6619fab4d242f3af1f5ad31772c5d142b6d68d937d22 WatchSource:0}: Error finding container 88c2c159b6bd4b00a3df6619fab4d242f3af1f5ad31772c5d142b6d68d937d22: Status 404 returned error can't find the container with id 88c2c159b6bd4b00a3df6619fab4d242f3af1f5ad31772c5d142b6d68d937d22 Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.439459 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" event={"ID":"e462626d-5645-4be7-89b4-383a4cde08f9","Type":"ContainerStarted","Data":"44e4ec3a36245df20df22798f83d568c82b16b2e3c1fd998105c9ab690fc3f54"} Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.439809 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" event={"ID":"e462626d-5645-4be7-89b4-383a4cde08f9","Type":"ContainerStarted","Data":"fe62818e0697339ea5b7e5f0f595f67af82511a1e0a2c3e04afee9f6dbb844b4"} Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.439819 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-dz6vg" event={"ID":"e462626d-5645-4be7-89b4-383a4cde08f9","Type":"ContainerStarted","Data":"88c2c159b6bd4b00a3df6619fab4d242f3af1f5ad31772c5d142b6d68d937d22"} Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.441895 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2stcx" event={"ID":"1821fc95-952c-44e2-9d50-5458327620e9","Type":"ContainerStarted","Data":"b67b44d4f4f71fe8a3520e80df0399c488335b0c3a872a7d939462ebaa5763a4"} Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.442057 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.442065 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.442451 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.443254 4760 generic.go:334] "Generic (PLEG): container finished" podID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerID="41ccd82806cf6322db0b844f1afc1d823fa3e981b68f184cd456b4f09dd3d509" exitCode=0 Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.443369 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerDied","Data":"41ccd82806cf6322db0b844f1afc1d823fa3e981b68f184cd456b4f09dd3d509"} Nov 24 17:06:35 crc kubenswrapper[4760]: E1124 17:06:35.444358 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-xrzxz" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" Nov 24 17:06:35 crc kubenswrapper[4760]: E1124 17:06:35.444971 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-bsld7" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" Nov 24 17:06:35 crc kubenswrapper[4760]: E1124 17:06:35.445056 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-l96mk" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" Nov 24 17:06:35 crc kubenswrapper[4760]: E1124 17:06:35.445622 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-l8l4r" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.455929 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-dz6vg" podStartSLOduration=169.45591076 podStartE2EDuration="2m49.45591076s" podCreationTimestamp="2025-11-24 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:06:35.453746439 +0000 UTC m=+190.776627989" watchObservedRunningTime="2025-11-24 17:06:35.45591076 +0000 UTC m=+190.778792310" Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.642119 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:06:35 crc kubenswrapper[4760]: I1124 17:06:35.642173 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:06:36 crc kubenswrapper[4760]: I1124 17:06:36.476079 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerStarted","Data":"b7f0ba7608295b14f45697fe7d7b398b8102d09256ede2657c74dd54cd134244"} Nov 24 17:06:36 crc kubenswrapper[4760]: I1124 17:06:36.476637 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:36 crc kubenswrapper[4760]: I1124 17:06:36.477230 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:36 crc kubenswrapper[4760]: I1124 17:06:36.503586 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6bpzv" podStartSLOduration=3.681022031 podStartE2EDuration="41.503559394s" podCreationTimestamp="2025-11-24 17:05:55 +0000 UTC" firstStartedPulling="2025-11-24 17:05:58.114870069 +0000 UTC m=+153.437751619" lastFinishedPulling="2025-11-24 17:06:35.937407422 +0000 UTC m=+191.260288982" observedRunningTime="2025-11-24 17:06:36.500449775 +0000 UTC m=+191.823331345" watchObservedRunningTime="2025-11-24 17:06:36.503559394 +0000 UTC m=+191.826440974" Nov 24 17:06:37 crc kubenswrapper[4760]: I1124 17:06:37.482839 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:37 crc kubenswrapper[4760]: I1124 17:06:37.482903 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:38 crc kubenswrapper[4760]: I1124 17:06:38.227701 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:38 crc kubenswrapper[4760]: I1124 17:06:38.228266 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:38 crc kubenswrapper[4760]: I1124 17:06:38.227868 4760 patch_prober.go:28] interesting pod/downloads-7954f5f757-2stcx container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 24 17:06:38 crc kubenswrapper[4760]: I1124 17:06:38.228390 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2stcx" podUID="1821fc95-952c-44e2-9d50-5458327620e9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 24 17:06:43 crc kubenswrapper[4760]: I1124 17:06:43.517332 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerStarted","Data":"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc"} Nov 24 17:06:44 crc kubenswrapper[4760]: I1124 17:06:44.523147 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerID="088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc" exitCode=0 Nov 24 17:06:44 crc kubenswrapper[4760]: I1124 17:06:44.523215 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerDied","Data":"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc"} Nov 24 17:06:45 crc kubenswrapper[4760]: I1124 17:06:45.530900 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerStarted","Data":"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a"} Nov 24 17:06:45 crc kubenswrapper[4760]: I1124 17:06:45.548470 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dffm4" podStartSLOduration=2.497928619 podStartE2EDuration="51.548451763s" podCreationTimestamp="2025-11-24 17:05:54 +0000 UTC" firstStartedPulling="2025-11-24 17:05:56.033716144 +0000 UTC m=+151.356597694" lastFinishedPulling="2025-11-24 17:06:45.084239258 +0000 UTC m=+200.407120838" observedRunningTime="2025-11-24 17:06:45.546404964 +0000 UTC m=+200.869286534" watchObservedRunningTime="2025-11-24 17:06:45.548451763 +0000 UTC m=+200.871333323" Nov 24 17:06:45 crc kubenswrapper[4760]: I1124 17:06:45.608372 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:45 crc kubenswrapper[4760]: I1124 17:06:45.608436 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:45 crc kubenswrapper[4760]: I1124 17:06:45.771502 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:46 crc kubenswrapper[4760]: I1124 17:06:46.576066 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:47 crc kubenswrapper[4760]: I1124 17:06:47.541128 4760 generic.go:334] "Generic (PLEG): container finished" podID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerID="bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc" exitCode=0 Nov 24 17:06:47 crc kubenswrapper[4760]: I1124 17:06:47.541193 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerDied","Data":"bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc"} Nov 24 17:06:48 crc kubenswrapper[4760]: I1124 17:06:48.250388 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-2stcx" Nov 24 17:06:48 crc kubenswrapper[4760]: I1124 17:06:48.550047 4760 generic.go:334] "Generic (PLEG): container finished" podID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerID="0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b" exitCode=0 Nov 24 17:06:48 crc kubenswrapper[4760]: I1124 17:06:48.550089 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerDied","Data":"0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b"} Nov 24 17:06:48 crc kubenswrapper[4760]: I1124 17:06:48.959863 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:06:48 crc kubenswrapper[4760]: I1124 17:06:48.960082 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6bpzv" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="registry-server" containerID="cri-o://b7f0ba7608295b14f45697fe7d7b398b8102d09256ede2657c74dd54cd134244" gracePeriod=2 Nov 24 17:06:49 crc kubenswrapper[4760]: I1124 17:06:49.558612 4760 generic.go:334] "Generic (PLEG): container finished" podID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerID="b7f0ba7608295b14f45697fe7d7b398b8102d09256ede2657c74dd54cd134244" exitCode=0 Nov 24 17:06:49 crc kubenswrapper[4760]: I1124 17:06:49.558657 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerDied","Data":"b7f0ba7608295b14f45697fe7d7b398b8102d09256ede2657c74dd54cd134244"} Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.245958 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.391549 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content\") pod \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.391903 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlvwp\" (UniqueName: \"kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp\") pod \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.391970 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities\") pod \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\" (UID: \"5013e377-ec2f-4559-afd3-d3aeab54b0ee\") " Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.392498 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities" (OuterVolumeSpecName: "utilities") pod "5013e377-ec2f-4559-afd3-d3aeab54b0ee" (UID: "5013e377-ec2f-4559-afd3-d3aeab54b0ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.397620 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp" (OuterVolumeSpecName: "kube-api-access-rlvwp") pod "5013e377-ec2f-4559-afd3-d3aeab54b0ee" (UID: "5013e377-ec2f-4559-afd3-d3aeab54b0ee"). InnerVolumeSpecName "kube-api-access-rlvwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.493304 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlvwp\" (UniqueName: \"kubernetes.io/projected/5013e377-ec2f-4559-afd3-d3aeab54b0ee-kube-api-access-rlvwp\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.493350 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.499697 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5013e377-ec2f-4559-afd3-d3aeab54b0ee" (UID: "5013e377-ec2f-4559-afd3-d3aeab54b0ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.567450 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6bpzv" event={"ID":"5013e377-ec2f-4559-afd3-d3aeab54b0ee","Type":"ContainerDied","Data":"20d7d9934235cb6b109b5353b70e028e52cd4d5667e3e779695491e12631f61b"} Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.567507 4760 scope.go:117] "RemoveContainer" containerID="b7f0ba7608295b14f45697fe7d7b398b8102d09256ede2657c74dd54cd134244" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.569107 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6bpzv" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.596925 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5013e377-ec2f-4559-afd3-d3aeab54b0ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.618090 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.625547 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6bpzv"] Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.660446 4760 scope.go:117] "RemoveContainer" containerID="41ccd82806cf6322db0b844f1afc1d823fa3e981b68f184cd456b4f09dd3d509" Nov 24 17:06:50 crc kubenswrapper[4760]: I1124 17:06:50.677983 4760 scope.go:117] "RemoveContainer" containerID="c1d869b70341e0c3be4b6e9fbd51af9e109e891de7d10b29863a835c2ed7be2a" Nov 24 17:06:51 crc kubenswrapper[4760]: I1124 17:06:51.472653 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" path="/var/lib/kubelet/pods/5013e377-ec2f-4559-afd3-d3aeab54b0ee/volumes" Nov 24 17:06:51 crc kubenswrapper[4760]: I1124 17:06:51.575182 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerStarted","Data":"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952"} Nov 24 17:06:52 crc kubenswrapper[4760]: I1124 17:06:52.584110 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerStarted","Data":"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727"} Nov 24 17:06:52 crc kubenswrapper[4760]: I1124 17:06:52.604100 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xrzxz" podStartSLOduration=4.883293393 podStartE2EDuration="1m1.604084701s" podCreationTimestamp="2025-11-24 17:05:51 +0000 UTC" firstStartedPulling="2025-11-24 17:05:53.939810968 +0000 UTC m=+149.262692518" lastFinishedPulling="2025-11-24 17:06:50.660602276 +0000 UTC m=+205.983483826" observedRunningTime="2025-11-24 17:06:52.601110195 +0000 UTC m=+207.923991745" watchObservedRunningTime="2025-11-24 17:06:52.604084701 +0000 UTC m=+207.926966251" Nov 24 17:06:52 crc kubenswrapper[4760]: I1124 17:06:52.623302 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k84dx" podStartSLOduration=4.908306013 podStartE2EDuration="1m0.623284804s" podCreationTimestamp="2025-11-24 17:05:52 +0000 UTC" firstStartedPulling="2025-11-24 17:05:53.944343796 +0000 UTC m=+149.267225346" lastFinishedPulling="2025-11-24 17:06:49.659322587 +0000 UTC m=+204.982204137" observedRunningTime="2025-11-24 17:06:52.62105892 +0000 UTC m=+207.943940470" watchObservedRunningTime="2025-11-24 17:06:52.623284804 +0000 UTC m=+207.946166354" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.245235 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.245487 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" containerID="cri-o://803b1d9402e56cff176635ddfa7a776a9b21aa8e15f15a67ab430534e4dd3e9e" gracePeriod=30 Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.330570 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.330799 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" containerID="cri-o://c8c6c3a48704104fa59028022238086fcc9d1701d6696e775af5b945b28fd3de" gracePeriod=30 Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.590522 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerStarted","Data":"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb"} Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.593106 4760 generic.go:334] "Generic (PLEG): container finished" podID="dd6a2056-7948-4823-bb36-f9e650d649db" containerID="803b1d9402e56cff176635ddfa7a776a9b21aa8e15f15a67ab430534e4dd3e9e" exitCode=0 Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.593150 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" event={"ID":"dd6a2056-7948-4823-bb36-f9e650d649db","Type":"ContainerDied","Data":"803b1d9402e56cff176635ddfa7a776a9b21aa8e15f15a67ab430534e4dd3e9e"} Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.594992 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerStarted","Data":"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc"} Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.597185 4760 generic.go:334] "Generic (PLEG): container finished" podID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerID="c8c6c3a48704104fa59028022238086fcc9d1701d6696e775af5b945b28fd3de" exitCode=0 Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.597260 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" event={"ID":"d5dec400-42dd-4869-a1eb-233e55cc120f","Type":"ContainerDied","Data":"c8c6c3a48704104fa59028022238086fcc9d1701d6696e775af5b945b28fd3de"} Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.663172 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.750339 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles\") pod \"dd6a2056-7948-4823-bb36-f9e650d649db\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.750392 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htpzm\" (UniqueName: \"kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm\") pod \"dd6a2056-7948-4823-bb36-f9e650d649db\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.750492 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert\") pod \"dd6a2056-7948-4823-bb36-f9e650d649db\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.750555 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca\") pod \"dd6a2056-7948-4823-bb36-f9e650d649db\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.750597 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config\") pod \"dd6a2056-7948-4823-bb36-f9e650d649db\" (UID: \"dd6a2056-7948-4823-bb36-f9e650d649db\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751068 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "dd6a2056-7948-4823-bb36-f9e650d649db" (UID: "dd6a2056-7948-4823-bb36-f9e650d649db"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751490 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca" (OuterVolumeSpecName: "client-ca") pod "dd6a2056-7948-4823-bb36-f9e650d649db" (UID: "dd6a2056-7948-4823-bb36-f9e650d649db"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751515 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config" (OuterVolumeSpecName: "config") pod "dd6a2056-7948-4823-bb36-f9e650d649db" (UID: "dd6a2056-7948-4823-bb36-f9e650d649db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751919 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751945 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.751957 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd6a2056-7948-4823-bb36-f9e650d649db-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.756939 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dd6a2056-7948-4823-bb36-f9e650d649db" (UID: "dd6a2056-7948-4823-bb36-f9e650d649db"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.756975 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm" (OuterVolumeSpecName: "kube-api-access-htpzm") pod "dd6a2056-7948-4823-bb36-f9e650d649db" (UID: "dd6a2056-7948-4823-bb36-f9e650d649db"). InnerVolumeSpecName "kube-api-access-htpzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.761491 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.852893 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config\") pod \"d5dec400-42dd-4869-a1eb-233e55cc120f\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.852965 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca\") pod \"d5dec400-42dd-4869-a1eb-233e55cc120f\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.853063 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgj9m\" (UniqueName: \"kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m\") pod \"d5dec400-42dd-4869-a1eb-233e55cc120f\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.853106 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert\") pod \"d5dec400-42dd-4869-a1eb-233e55cc120f\" (UID: \"d5dec400-42dd-4869-a1eb-233e55cc120f\") " Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.854060 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htpzm\" (UniqueName: \"kubernetes.io/projected/dd6a2056-7948-4823-bb36-f9e650d649db-kube-api-access-htpzm\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.854098 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd6a2056-7948-4823-bb36-f9e650d649db-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.854654 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config" (OuterVolumeSpecName: "config") pod "d5dec400-42dd-4869-a1eb-233e55cc120f" (UID: "d5dec400-42dd-4869-a1eb-233e55cc120f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.854723 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca" (OuterVolumeSpecName: "client-ca") pod "d5dec400-42dd-4869-a1eb-233e55cc120f" (UID: "d5dec400-42dd-4869-a1eb-233e55cc120f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.857610 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d5dec400-42dd-4869-a1eb-233e55cc120f" (UID: "d5dec400-42dd-4869-a1eb-233e55cc120f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.858870 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m" (OuterVolumeSpecName: "kube-api-access-vgj9m") pod "d5dec400-42dd-4869-a1eb-233e55cc120f" (UID: "d5dec400-42dd-4869-a1eb-233e55cc120f"). InnerVolumeSpecName "kube-api-access-vgj9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.955473 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.955511 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgj9m\" (UniqueName: \"kubernetes.io/projected/d5dec400-42dd-4869-a1eb-233e55cc120f-kube-api-access-vgj9m\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.955523 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5dec400-42dd-4869-a1eb-233e55cc120f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:53 crc kubenswrapper[4760]: I1124 17:06:53.955533 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5dec400-42dd-4869-a1eb-233e55cc120f-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.528714 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.530881 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531155 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.531180 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f7fe45c-96f1-488a-b95a-3adbbe5c6900" containerName="pruner" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531189 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f7fe45c-96f1-488a-b95a-3adbbe5c6900" containerName="pruner" Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.531201 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="registry-server" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531210 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="registry-server" Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.531233 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="extract-content" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531240 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="extract-content" Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.531256 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="extract-utilities" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531265 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="extract-utilities" Nov 24 17:06:54 crc kubenswrapper[4760]: E1124 17:06:54.531276 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531284 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531446 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" containerName="route-controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531478 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f7fe45c-96f1-488a-b95a-3adbbe5c6900" containerName="pruner" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531497 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" containerName="controller-manager" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.531510 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="5013e377-ec2f-4559-afd3-d3aeab54b0ee" containerName="registry-server" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.532159 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.533585 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.534400 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.551324 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.559947 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.561975 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glv2m\" (UniqueName: \"kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562062 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cfh5\" (UniqueName: \"kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562099 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562163 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562193 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562215 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562240 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562262 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.562294 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.605258 4760 generic.go:334] "Generic (PLEG): container finished" podID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerID="b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc" exitCode=0 Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.605343 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerDied","Data":"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.607483 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" event={"ID":"d5dec400-42dd-4869-a1eb-233e55cc120f","Type":"ContainerDied","Data":"a865858490d9de76c4d230483388da6fa91c5d86a09f15bf5778d120b246f106"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.607605 4760 scope.go:117] "RemoveContainer" containerID="c8c6c3a48704104fa59028022238086fcc9d1701d6696e775af5b945b28fd3de" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.608081 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.612948 4760 generic.go:334] "Generic (PLEG): container finished" podID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerID="2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f" exitCode=0 Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.613669 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerDied","Data":"2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.617838 4760 generic.go:334] "Generic (PLEG): container finished" podID="80110199-7935-43a8-9025-f048cc22defb" containerID="394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44" exitCode=0 Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.617899 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerDied","Data":"394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.622965 4760 generic.go:334] "Generic (PLEG): container finished" podID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerID="f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb" exitCode=0 Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.623065 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerDied","Data":"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.630182 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" event={"ID":"dd6a2056-7948-4823-bb36-f9e650d649db","Type":"ContainerDied","Data":"fc33cf651d95b3101f872fdf850d2ab4b67e1067e714942659eee0a24dc2f180"} Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.630375 4760 scope.go:117] "RemoveContainer" containerID="803b1d9402e56cff176635ddfa7a776a9b21aa8e15f15a67ab430534e4dd3e9e" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.631456 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bmf26" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.663280 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cfh5\" (UniqueName: \"kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.663532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.663948 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.664098 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.664809 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.664932 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.665068 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.665787 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.665790 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.665937 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glv2m\" (UniqueName: \"kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.664527 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.668696 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.668758 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.669405 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.670143 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.670643 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.684672 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cfh5\" (UniqueName: \"kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5\") pod \"controller-manager-6b68577b7b-dn2gh\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.685385 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glv2m\" (UniqueName: \"kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m\") pod \"route-controller-manager-6b75f56f44-8cxtt\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.699091 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.702806 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bmf26"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.707203 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.709770 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xxkwx"] Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.858628 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:54 crc kubenswrapper[4760]: I1124 17:06:54.868847 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.045820 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.098236 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.191469 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.191516 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.240443 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.473589 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5dec400-42dd-4869-a1eb-233e55cc120f" path="/var/lib/kubelet/pods/d5dec400-42dd-4869-a1eb-233e55cc120f/volumes" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.474633 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd6a2056-7948-4823-bb36-f9e650d649db" path="/var/lib/kubelet/pods/dd6a2056-7948-4823-bb36-f9e650d649db/volumes" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.636219 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" event={"ID":"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3","Type":"ContainerStarted","Data":"44208e93c63ce18b84cf34694df81f6fafa1ef91681429dda07063ea9b74adc3"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.636256 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" event={"ID":"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3","Type":"ContainerStarted","Data":"b3b8c9ade02b559f36f9792cafe2bd54e390a135dac4a046fd17491f4d2d548f"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.636494 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.637993 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerStarted","Data":"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.644619 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerStarted","Data":"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.644725 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.647366 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerStarted","Data":"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.649701 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" event={"ID":"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c","Type":"ContainerStarted","Data":"1164a6667e4bb4e2c3fc6177061e3e75e2130387d5bcd956d79f8983e199e9b6"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.649729 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" event={"ID":"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c","Type":"ContainerStarted","Data":"44d68ddd17365046d119b660e7dd549ae97635c70abda0a451d1698b0d56fb6d"} Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.650105 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.654471 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" podStartSLOduration=2.654454169 podStartE2EDuration="2.654454169s" podCreationTimestamp="2025-11-24 17:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:06:55.651937537 +0000 UTC m=+210.974819087" watchObservedRunningTime="2025-11-24 17:06:55.654454169 +0000 UTC m=+210.977335719" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.671872 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l96mk" podStartSLOduration=3.2826711140000002 podStartE2EDuration="1m4.671851211s" podCreationTimestamp="2025-11-24 17:05:51 +0000 UTC" firstStartedPulling="2025-11-24 17:05:53.937321697 +0000 UTC m=+149.260203247" lastFinishedPulling="2025-11-24 17:06:55.326501784 +0000 UTC m=+210.649383344" observedRunningTime="2025-11-24 17:06:55.67147853 +0000 UTC m=+210.994360080" watchObservedRunningTime="2025-11-24 17:06:55.671851211 +0000 UTC m=+210.994732761" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.695794 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l8l4r" podStartSLOduration=2.336486247 podStartE2EDuration="1m2.695779721s" podCreationTimestamp="2025-11-24 17:05:53 +0000 UTC" firstStartedPulling="2025-11-24 17:05:55.003785994 +0000 UTC m=+150.326667544" lastFinishedPulling="2025-11-24 17:06:55.363079468 +0000 UTC m=+210.685961018" observedRunningTime="2025-11-24 17:06:55.693303259 +0000 UTC m=+211.016184809" watchObservedRunningTime="2025-11-24 17:06:55.695779721 +0000 UTC m=+211.018661271" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.703352 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.718642 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" podStartSLOduration=2.718626459 podStartE2EDuration="2.718626459s" podCreationTimestamp="2025-11-24 17:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:06:55.717364603 +0000 UTC m=+211.040246153" watchObservedRunningTime="2025-11-24 17:06:55.718626459 +0000 UTC m=+211.041507999" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.758041 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bsld7" podStartSLOduration=2.207891429 podStartE2EDuration="1m3.758025185s" podCreationTimestamp="2025-11-24 17:05:52 +0000 UTC" firstStartedPulling="2025-11-24 17:05:53.935783924 +0000 UTC m=+149.258665474" lastFinishedPulling="2025-11-24 17:06:55.48591768 +0000 UTC m=+210.808799230" observedRunningTime="2025-11-24 17:06:55.74881651 +0000 UTC m=+211.071698060" watchObservedRunningTime="2025-11-24 17:06:55.758025185 +0000 UTC m=+211.080906735" Nov 24 17:06:55 crc kubenswrapper[4760]: I1124 17:06:55.846634 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:06:56 crc kubenswrapper[4760]: I1124 17:06:56.657466 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerStarted","Data":"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2"} Nov 24 17:06:56 crc kubenswrapper[4760]: I1124 17:06:56.683357 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-psr87" podStartSLOduration=2.504155432 podStartE2EDuration="1m2.683340753s" podCreationTimestamp="2025-11-24 17:05:54 +0000 UTC" firstStartedPulling="2025-11-24 17:05:56.022192637 +0000 UTC m=+151.345074187" lastFinishedPulling="2025-11-24 17:06:56.201377958 +0000 UTC m=+211.524259508" observedRunningTime="2025-11-24 17:06:56.681185161 +0000 UTC m=+212.004066711" watchObservedRunningTime="2025-11-24 17:06:56.683340753 +0000 UTC m=+212.006222303" Nov 24 17:06:56 crc kubenswrapper[4760]: I1124 17:06:56.853650 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:01.999336 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.000252 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.081246 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.189904 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.189998 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.256446 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.390389 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.390455 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.587388 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.587741 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.657129 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.673272 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.764949 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.769109 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.781908 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:07:02 crc kubenswrapper[4760]: I1124 17:07:02.796613 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:07:03 crc kubenswrapper[4760]: I1124 17:07:03.734287 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.051672 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.051748 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.135692 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.668755 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.668825 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.710096 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bsld7" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="registry-server" containerID="cri-o://4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4" gracePeriod=2 Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.735859 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.775260 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:07:04 crc kubenswrapper[4760]: I1124 17:07:04.808649 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.125404 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.126136 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k84dx" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="registry-server" containerID="cri-o://7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727" gracePeriod=2 Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.277999 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.323445 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities\") pod \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.323524 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content\") pod \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.323605 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9s8z\" (UniqueName: \"kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z\") pod \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\" (UID: \"cad1e7d4-527f-4dc5-831c-3eaa397c510c\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.325748 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities" (OuterVolumeSpecName: "utilities") pod "cad1e7d4-527f-4dc5-831c-3eaa397c510c" (UID: "cad1e7d4-527f-4dc5-831c-3eaa397c510c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.330806 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z" (OuterVolumeSpecName: "kube-api-access-b9s8z") pod "cad1e7d4-527f-4dc5-831c-3eaa397c510c" (UID: "cad1e7d4-527f-4dc5-831c-3eaa397c510c"). InnerVolumeSpecName "kube-api-access-b9s8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.382434 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cad1e7d4-527f-4dc5-831c-3eaa397c510c" (UID: "cad1e7d4-527f-4dc5-831c-3eaa397c510c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.424895 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9s8z\" (UniqueName: \"kubernetes.io/projected/cad1e7d4-527f-4dc5-831c-3eaa397c510c-kube-api-access-b9s8z\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.424935 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.424950 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad1e7d4-527f-4dc5-831c-3eaa397c510c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.624164 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.642323 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.642388 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.642444 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.643319 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.643397 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696" gracePeriod=600 Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.720111 4760 generic.go:334] "Generic (PLEG): container finished" podID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerID="7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727" exitCode=0 Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.720218 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k84dx" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.720229 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerDied","Data":"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727"} Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.720312 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k84dx" event={"ID":"1423e3b0-3691-49ce-b29d-0f838db4ce3e","Type":"ContainerDied","Data":"c72c8b5ad7552e8bee792820095552668107142565d64aa05fa64e423c8227df"} Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.720343 4760 scope.go:117] "RemoveContainer" containerID="7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.723160 4760 generic.go:334] "Generic (PLEG): container finished" podID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerID="4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4" exitCode=0 Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.723215 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsld7" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.723263 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerDied","Data":"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4"} Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.723328 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsld7" event={"ID":"cad1e7d4-527f-4dc5-831c-3eaa397c510c","Type":"ContainerDied","Data":"7258de4d638efa027aaf1cb3a7eff3726d12a24d9c71f4cc8399eca054240734"} Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.729209 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities\") pod \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.729295 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content\") pod \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.729437 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8pmp\" (UniqueName: \"kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp\") pod \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\" (UID: \"1423e3b0-3691-49ce-b29d-0f838db4ce3e\") " Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.730189 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities" (OuterVolumeSpecName: "utilities") pod "1423e3b0-3691-49ce-b29d-0f838db4ce3e" (UID: "1423e3b0-3691-49ce-b29d-0f838db4ce3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.735346 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp" (OuterVolumeSpecName: "kube-api-access-x8pmp") pod "1423e3b0-3691-49ce-b29d-0f838db4ce3e" (UID: "1423e3b0-3691-49ce-b29d-0f838db4ce3e"). InnerVolumeSpecName "kube-api-access-x8pmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.750998 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.751691 4760 scope.go:117] "RemoveContainer" containerID="bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.759176 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bsld7"] Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.777685 4760 scope.go:117] "RemoveContainer" containerID="e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.794508 4760 scope.go:117] "RemoveContainer" containerID="7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.795081 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727\": container with ID starting with 7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727 not found: ID does not exist" containerID="7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.795121 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727"} err="failed to get container status \"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727\": rpc error: code = NotFound desc = could not find container \"7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727\": container with ID starting with 7f197d3f92e81f48bd65c9fd09bc2063412292bc839585b9c23370c37205b727 not found: ID does not exist" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.795148 4760 scope.go:117] "RemoveContainer" containerID="bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.795441 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc\": container with ID starting with bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc not found: ID does not exist" containerID="bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.795467 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc"} err="failed to get container status \"bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc\": rpc error: code = NotFound desc = could not find container \"bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc\": container with ID starting with bc1b7ac97958a4cb164ed3b4a101cf0aa9d83dc2810de2dc2f98f509129ef4bc not found: ID does not exist" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.795483 4760 scope.go:117] "RemoveContainer" containerID="e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.796024 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d\": container with ID starting with e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d not found: ID does not exist" containerID="e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.796051 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d"} err="failed to get container status \"e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d\": rpc error: code = NotFound desc = could not find container \"e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d\": container with ID starting with e2c773cee0570826ba7d983b45ebd9e60cbad579f9283743dc108d24cd136c0d not found: ID does not exist" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.796068 4760 scope.go:117] "RemoveContainer" containerID="4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.805373 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1423e3b0-3691-49ce-b29d-0f838db4ce3e" (UID: "1423e3b0-3691-49ce-b29d-0f838db4ce3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.811457 4760 scope.go:117] "RemoveContainer" containerID="2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.831159 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8pmp\" (UniqueName: \"kubernetes.io/projected/1423e3b0-3691-49ce-b29d-0f838db4ce3e-kube-api-access-x8pmp\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.831191 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.831203 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1423e3b0-3691-49ce-b29d-0f838db4ce3e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.840324 4760 scope.go:117] "RemoveContainer" containerID="9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.857228 4760 scope.go:117] "RemoveContainer" containerID="4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.857744 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4\": container with ID starting with 4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4 not found: ID does not exist" containerID="4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.857791 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4"} err="failed to get container status \"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4\": rpc error: code = NotFound desc = could not find container \"4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4\": container with ID starting with 4125ad46fdc373be069671760f8a9fe2622e6c846e3e07455a79d2dfa4934bd4 not found: ID does not exist" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.857903 4760 scope.go:117] "RemoveContainer" containerID="2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.858421 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f\": container with ID starting with 2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f not found: ID does not exist" containerID="2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.858452 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f"} err="failed to get container status \"2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f\": rpc error: code = NotFound desc = could not find container \"2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f\": container with ID starting with 2e2cb26ab12dab0b2ac0c01fe883ac56858b826d9a931c777461944213e5f83f not found: ID does not exist" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.858471 4760 scope.go:117] "RemoveContainer" containerID="9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8" Nov 24 17:07:05 crc kubenswrapper[4760]: E1124 17:07:05.858759 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8\": container with ID starting with 9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8 not found: ID does not exist" containerID="9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8" Nov 24 17:07:05 crc kubenswrapper[4760]: I1124 17:07:05.858787 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8"} err="failed to get container status \"9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8\": rpc error: code = NotFound desc = could not find container \"9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8\": container with ID starting with 9b4efcd55ab4b44bcd4bd80bc48b297ee55756dc6d494d31301a487b0355cbb8 not found: ID does not exist" Nov 24 17:07:06 crc kubenswrapper[4760]: I1124 17:07:06.077792 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:07:06 crc kubenswrapper[4760]: I1124 17:07:06.082920 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k84dx"] Nov 24 17:07:06 crc kubenswrapper[4760]: I1124 17:07:06.735195 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696" exitCode=0 Nov 24 17:07:06 crc kubenswrapper[4760]: I1124 17:07:06.735301 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696"} Nov 24 17:07:06 crc kubenswrapper[4760]: I1124 17:07:06.735616 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb"} Nov 24 17:07:07 crc kubenswrapper[4760]: I1124 17:07:07.474401 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" path="/var/lib/kubelet/pods/1423e3b0-3691-49ce-b29d-0f838db4ce3e/volumes" Nov 24 17:07:07 crc kubenswrapper[4760]: I1124 17:07:07.475188 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" path="/var/lib/kubelet/pods/cad1e7d4-527f-4dc5-831c-3eaa397c510c/volumes" Nov 24 17:07:07 crc kubenswrapper[4760]: I1124 17:07:07.525864 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:07:07 crc kubenswrapper[4760]: I1124 17:07:07.526144 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-psr87" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="registry-server" containerID="cri-o://1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2" gracePeriod=2 Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.580987 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.666983 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content\") pod \"80110199-7935-43a8-9025-f048cc22defb\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.667098 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities\") pod \"80110199-7935-43a8-9025-f048cc22defb\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.667120 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8vsh\" (UniqueName: \"kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh\") pod \"80110199-7935-43a8-9025-f048cc22defb\" (UID: \"80110199-7935-43a8-9025-f048cc22defb\") " Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.667919 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities" (OuterVolumeSpecName: "utilities") pod "80110199-7935-43a8-9025-f048cc22defb" (UID: "80110199-7935-43a8-9025-f048cc22defb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.675071 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh" (OuterVolumeSpecName: "kube-api-access-g8vsh") pod "80110199-7935-43a8-9025-f048cc22defb" (UID: "80110199-7935-43a8-9025-f048cc22defb"). InnerVolumeSpecName "kube-api-access-g8vsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.684609 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80110199-7935-43a8-9025-f048cc22defb" (UID: "80110199-7935-43a8-9025-f048cc22defb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.748637 4760 generic.go:334] "Generic (PLEG): container finished" podID="80110199-7935-43a8-9025-f048cc22defb" containerID="1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2" exitCode=0 Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.748676 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerDied","Data":"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2"} Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.748703 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psr87" event={"ID":"80110199-7935-43a8-9025-f048cc22defb","Type":"ContainerDied","Data":"7b5e3a3f8ee03469a71e0b401c9b32aec475d820953082625d25a06f5eb4b4da"} Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.748719 4760 scope.go:117] "RemoveContainer" containerID="1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.748750 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psr87" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.764698 4760 scope.go:117] "RemoveContainer" containerID="394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.768157 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.768177 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8vsh\" (UniqueName: \"kubernetes.io/projected/80110199-7935-43a8-9025-f048cc22defb-kube-api-access-g8vsh\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.768187 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80110199-7935-43a8-9025-f048cc22defb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.777832 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.780149 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-psr87"] Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.792047 4760 scope.go:117] "RemoveContainer" containerID="d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.805054 4760 scope.go:117] "RemoveContainer" containerID="1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2" Nov 24 17:07:08 crc kubenswrapper[4760]: E1124 17:07:08.805523 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2\": container with ID starting with 1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2 not found: ID does not exist" containerID="1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.805569 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2"} err="failed to get container status \"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2\": rpc error: code = NotFound desc = could not find container \"1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2\": container with ID starting with 1a47ba779bec79edfc7bb7430afe586ef8c25392e418e44399165ac714d8beb2 not found: ID does not exist" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.805596 4760 scope.go:117] "RemoveContainer" containerID="394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44" Nov 24 17:07:08 crc kubenswrapper[4760]: E1124 17:07:08.805858 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44\": container with ID starting with 394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44 not found: ID does not exist" containerID="394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.805889 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44"} err="failed to get container status \"394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44\": rpc error: code = NotFound desc = could not find container \"394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44\": container with ID starting with 394b8fbc8a6f3f8068853ec64b0f2d50bdad0c78e8045aca709951f679e02e44 not found: ID does not exist" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.805916 4760 scope.go:117] "RemoveContainer" containerID="d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e" Nov 24 17:07:08 crc kubenswrapper[4760]: E1124 17:07:08.806151 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e\": container with ID starting with d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e not found: ID does not exist" containerID="d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e" Nov 24 17:07:08 crc kubenswrapper[4760]: I1124 17:07:08.806176 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e"} err="failed to get container status \"d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e\": rpc error: code = NotFound desc = could not find container \"d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e\": container with ID starting with d1c7c4518b853412745e3da745caa801797c0be8810f8cdfdf8b857e3a32919e not found: ID does not exist" Nov 24 17:07:09 crc kubenswrapper[4760]: I1124 17:07:09.479244 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80110199-7935-43a8-9025-f048cc22defb" path="/var/lib/kubelet/pods/80110199-7935-43a8-9025-f048cc22defb/volumes" Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.142187 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.142963 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" podUID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" containerName="controller-manager" containerID="cri-o://44208e93c63ce18b84cf34694df81f6fafa1ef91681429dda07063ea9b74adc3" gracePeriod=30 Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.158824 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.159112 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" podUID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" containerName="route-controller-manager" containerID="cri-o://1164a6667e4bb4e2c3fc6177061e3e75e2130387d5bcd956d79f8983e199e9b6" gracePeriod=30 Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.788090 4760 generic.go:334] "Generic (PLEG): container finished" podID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" containerID="1164a6667e4bb4e2c3fc6177061e3e75e2130387d5bcd956d79f8983e199e9b6" exitCode=0 Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.788201 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" event={"ID":"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c","Type":"ContainerDied","Data":"1164a6667e4bb4e2c3fc6177061e3e75e2130387d5bcd956d79f8983e199e9b6"} Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.790467 4760 generic.go:334] "Generic (PLEG): container finished" podID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" containerID="44208e93c63ce18b84cf34694df81f6fafa1ef91681429dda07063ea9b74adc3" exitCode=0 Nov 24 17:07:13 crc kubenswrapper[4760]: I1124 17:07:13.790539 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" event={"ID":"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3","Type":"ContainerDied","Data":"44208e93c63ce18b84cf34694df81f6fafa1ef91681429dda07063ea9b74adc3"} Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.539525 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.552886 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca\") pod \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.552943 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config\") pod \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.553017 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert\") pod \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.553044 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glv2m\" (UniqueName: \"kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m\") pod \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\" (UID: \"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.553961 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca" (OuterVolumeSpecName: "client-ca") pod "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" (UID: "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.554534 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config" (OuterVolumeSpecName: "config") pod "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" (UID: "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.567881 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m" (OuterVolumeSpecName: "kube-api-access-glv2m") pod "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" (UID: "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c"). InnerVolumeSpecName "kube-api-access-glv2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.577713 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" (UID: "7589d1e8-0bb3-49f1-b3be-5b6eca7e202c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.596796 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62"] Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597186 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597207 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597231 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" containerName="route-controller-manager" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597243 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" containerName="route-controller-manager" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597262 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597273 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597287 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597297 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597315 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597328 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597347 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597367 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597388 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597399 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597413 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597422 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="extract-content" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597449 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597460 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: E1124 17:07:14.597476 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597486 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="extract-utilities" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597665 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cad1e7d4-527f-4dc5-831c-3eaa397c510c" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597679 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="80110199-7935-43a8-9025-f048cc22defb" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597693 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" containerName="route-controller-manager" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.597717 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1423e3b0-3691-49ce-b29d-0f838db4ce3e" containerName="registry-server" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.598413 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.602529 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62"] Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.624817 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.654091 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert\") pod \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.654155 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cfh5\" (UniqueName: \"kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5\") pod \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.654294 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles\") pod \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655188 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca" (OuterVolumeSpecName: "client-ca") pod "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" (UID: "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655196 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" (UID: "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655244 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca\") pod \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655320 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config\") pod \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\" (UID: \"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3\") " Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655670 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-client-ca\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655836 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06c8a4ae-8ecd-4c46-85b5-4040f943f927-serving-cert\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.655943 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6n8z\" (UniqueName: \"kubernetes.io/projected/06c8a4ae-8ecd-4c46-85b5-4040f943f927-kube-api-access-r6n8z\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656035 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config" (OuterVolumeSpecName: "config") pod "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" (UID: "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656081 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-config\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656218 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656238 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656252 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656264 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glv2m\" (UniqueName: \"kubernetes.io/projected/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-kube-api-access-glv2m\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656278 4760 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656290 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.656301 4760 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.657670 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5" (OuterVolumeSpecName: "kube-api-access-5cfh5") pod "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" (UID: "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3"). InnerVolumeSpecName "kube-api-access-5cfh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.658494 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" (UID: "ccaef938-0260-4514-b8a0-e2ebcf2ab5e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758080 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06c8a4ae-8ecd-4c46-85b5-4040f943f927-serving-cert\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758180 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6n8z\" (UniqueName: \"kubernetes.io/projected/06c8a4ae-8ecd-4c46-85b5-4040f943f927-kube-api-access-r6n8z\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758235 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-config\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758288 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-client-ca\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758385 4760 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.758407 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cfh5\" (UniqueName: \"kubernetes.io/projected/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3-kube-api-access-5cfh5\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.759724 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-client-ca\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.765401 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06c8a4ae-8ecd-4c46-85b5-4040f943f927-config\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.767332 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06c8a4ae-8ecd-4c46-85b5-4040f943f927-serving-cert\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.785868 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6n8z\" (UniqueName: \"kubernetes.io/projected/06c8a4ae-8ecd-4c46-85b5-4040f943f927-kube-api-access-r6n8z\") pod \"route-controller-manager-9ffbb994d-w8g62\" (UID: \"06c8a4ae-8ecd-4c46-85b5-4040f943f927\") " pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.798401 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" event={"ID":"7589d1e8-0bb3-49f1-b3be-5b6eca7e202c","Type":"ContainerDied","Data":"44d68ddd17365046d119b660e7dd549ae97635c70abda0a451d1698b0d56fb6d"} Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.798428 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.798483 4760 scope.go:117] "RemoveContainer" containerID="1164a6667e4bb4e2c3fc6177061e3e75e2130387d5bcd956d79f8983e199e9b6" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.800436 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" event={"ID":"ccaef938-0260-4514-b8a0-e2ebcf2ab5e3","Type":"ContainerDied","Data":"b3b8c9ade02b559f36f9792cafe2bd54e390a135dac4a046fd17491f4d2d548f"} Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.800489 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68577b7b-dn2gh" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.830098 4760 scope.go:117] "RemoveContainer" containerID="44208e93c63ce18b84cf34694df81f6fafa1ef91681429dda07063ea9b74adc3" Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.853391 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.863935 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b75f56f44-8cxtt"] Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.869121 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.874266 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6b68577b7b-dn2gh"] Nov 24 17:07:14 crc kubenswrapper[4760]: I1124 17:07:14.934268 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.198258 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62"] Nov 24 17:07:15 crc kubenswrapper[4760]: W1124 17:07:15.207199 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06c8a4ae_8ecd_4c46_85b5_4040f943f927.slice/crio-022b3716cb256f845a80e23cdb3e8093ea20d0d0972abe78e8101a14d3053a5e WatchSource:0}: Error finding container 022b3716cb256f845a80e23cdb3e8093ea20d0d0972abe78e8101a14d3053a5e: Status 404 returned error can't find the container with id 022b3716cb256f845a80e23cdb3e8093ea20d0d0972abe78e8101a14d3053a5e Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.472899 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7589d1e8-0bb3-49f1-b3be-5b6eca7e202c" path="/var/lib/kubelet/pods/7589d1e8-0bb3-49f1-b3be-5b6eca7e202c/volumes" Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.474119 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" path="/var/lib/kubelet/pods/ccaef938-0260-4514-b8a0-e2ebcf2ab5e3/volumes" Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.807817 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" event={"ID":"06c8a4ae-8ecd-4c46-85b5-4040f943f927","Type":"ContainerStarted","Data":"6e9f37335bf6530842bf0603785e64f8869dabf2bbbae3a27e3844f77d439772"} Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.807867 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" event={"ID":"06c8a4ae-8ecd-4c46-85b5-4040f943f927","Type":"ContainerStarted","Data":"022b3716cb256f845a80e23cdb3e8093ea20d0d0972abe78e8101a14d3053a5e"} Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.808447 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.831122 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" podStartSLOduration=2.831105595 podStartE2EDuration="2.831105595s" podCreationTimestamp="2025-11-24 17:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:07:15.82815042 +0000 UTC m=+231.151031970" watchObservedRunningTime="2025-11-24 17:07:15.831105595 +0000 UTC m=+231.153987145" Nov 24 17:07:15 crc kubenswrapper[4760]: I1124 17:07:15.872959 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-9ffbb994d-w8g62" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.549935 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz"] Nov 24 17:07:17 crc kubenswrapper[4760]: E1124 17:07:17.550295 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" containerName="controller-manager" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.550316 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" containerName="controller-manager" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.550484 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccaef938-0260-4514-b8a0-e2ebcf2ab5e3" containerName="controller-manager" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.551076 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.554877 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.555553 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.555716 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.555828 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.556208 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.558784 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.568120 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz"] Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.572892 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.699213 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-serving-cert\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.699333 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-config\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.699442 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-proxy-ca-bundles\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.699514 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbp9l\" (UniqueName: \"kubernetes.io/projected/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-kube-api-access-vbp9l\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.699597 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-client-ca\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.801046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-client-ca\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.801828 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-serving-cert\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.801879 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-config\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.801949 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-proxy-ca-bundles\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.801992 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbp9l\" (UniqueName: \"kubernetes.io/projected/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-kube-api-access-vbp9l\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.803345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-proxy-ca-bundles\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.803503 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-config\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.803777 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-client-ca\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.823345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbp9l\" (UniqueName: \"kubernetes.io/projected/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-kube-api-access-vbp9l\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.823848 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf0c3e2-8d7b-4a73-b513-3e038f4aa623-serving-cert\") pod \"controller-manager-67dc9c87b4-z2rnz\" (UID: \"faf0c3e2-8d7b-4a73-b513-3e038f4aa623\") " pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:17 crc kubenswrapper[4760]: I1124 17:07:17.914748 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.313945 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz"] Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.826798 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" event={"ID":"faf0c3e2-8d7b-4a73-b513-3e038f4aa623","Type":"ContainerStarted","Data":"c65e1029f3928c7633b7102caeffb573803d3a5c40584a9072c3206b57b7eaad"} Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.826839 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" event={"ID":"faf0c3e2-8d7b-4a73-b513-3e038f4aa623","Type":"ContainerStarted","Data":"86842ce455ca7bf9f540bfaefe2a6fab58451300bfb6924895c7354bdf9001ae"} Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.827104 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.832653 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" Nov 24 17:07:18 crc kubenswrapper[4760]: I1124 17:07:18.844688 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-67dc9c87b4-z2rnz" podStartSLOduration=5.844670882 podStartE2EDuration="5.844670882s" podCreationTimestamp="2025-11-24 17:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:07:18.843218971 +0000 UTC m=+234.166100531" watchObservedRunningTime="2025-11-24 17:07:18.844670882 +0000 UTC m=+234.167552432" Nov 24 17:07:21 crc kubenswrapper[4760]: I1124 17:07:21.876344 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" podUID="44091a4f-586a-44f5-934d-294bbe4458c0" containerName="oauth-openshift" containerID="cri-o://c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c" gracePeriod=15 Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.369117 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.565557 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.565652 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.565910 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.565979 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566034 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566079 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566110 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566151 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566187 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566209 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566236 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-425ww\" (UniqueName: \"kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566220 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566302 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566331 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566351 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert\") pod \"44091a4f-586a-44f5-934d-294bbe4458c0\" (UID: \"44091a4f-586a-44f5-934d-294bbe4458c0\") " Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.566715 4760 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44091a4f-586a-44f5-934d-294bbe4458c0-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.567641 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.567685 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.567853 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.568279 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.572762 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.573224 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.573363 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww" (OuterVolumeSpecName: "kube-api-access-425ww") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "kube-api-access-425ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.575325 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.575665 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.575864 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.576166 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.576298 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.579385 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "44091a4f-586a-44f5-934d-294bbe4458c0" (UID: "44091a4f-586a-44f5-934d-294bbe4458c0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.667933 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.667996 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668070 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-425ww\" (UniqueName: \"kubernetes.io/projected/44091a4f-586a-44f5-934d-294bbe4458c0-kube-api-access-425ww\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668094 4760 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668112 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668131 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668149 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668167 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668188 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668207 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668226 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668246 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.668265 4760 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/44091a4f-586a-44f5-934d-294bbe4458c0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.870539 4760 generic.go:334] "Generic (PLEG): container finished" podID="44091a4f-586a-44f5-934d-294bbe4458c0" containerID="c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c" exitCode=0 Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.870618 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" event={"ID":"44091a4f-586a-44f5-934d-294bbe4458c0","Type":"ContainerDied","Data":"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c"} Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.870669 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" event={"ID":"44091a4f-586a-44f5-934d-294bbe4458c0","Type":"ContainerDied","Data":"f84363d016c15a3a7d6517522c4dec51050a5c823c1268fd48c853dc2d8f993f"} Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.870708 4760 scope.go:117] "RemoveContainer" containerID="c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.870902 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zg8fk" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.901436 4760 scope.go:117] "RemoveContainer" containerID="c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c" Nov 24 17:07:22 crc kubenswrapper[4760]: E1124 17:07:22.903134 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c\": container with ID starting with c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c not found: ID does not exist" containerID="c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.903191 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c"} err="failed to get container status \"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c\": rpc error: code = NotFound desc = could not find container \"c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c\": container with ID starting with c361cf264126fa2d1358480e6e034dd6bce0a2fa223e69ff1f795489ba1f668c not found: ID does not exist" Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.928076 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:07:22 crc kubenswrapper[4760]: I1124 17:07:22.931147 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zg8fk"] Nov 24 17:07:23 crc kubenswrapper[4760]: I1124 17:07:23.477738 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44091a4f-586a-44f5-934d-294bbe4458c0" path="/var/lib/kubelet/pods/44091a4f-586a-44f5-934d-294bbe4458c0/volumes" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.558466 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-9zmkq"] Nov 24 17:07:29 crc kubenswrapper[4760]: E1124 17:07:29.560071 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44091a4f-586a-44f5-934d-294bbe4458c0" containerName="oauth-openshift" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.560098 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="44091a4f-586a-44f5-934d-294bbe4458c0" containerName="oauth-openshift" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.560368 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="44091a4f-586a-44f5-934d-294bbe4458c0" containerName="oauth-openshift" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.560933 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.563739 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570549 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570735 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570727 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570812 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570834 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.570979 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.571046 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.571055 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.571105 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.571214 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.573594 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.577708 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.588312 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-9zmkq"] Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.589244 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.594046 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.662482 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.662842 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qbhh\" (UniqueName: \"kubernetes.io/projected/2c6cf59b-c655-44f0-975a-edba564bff9c-kube-api-access-2qbhh\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.662911 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.662963 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663000 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663063 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663106 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663424 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663530 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663622 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-dir\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663666 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-policies\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663700 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663745 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.663834 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765525 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765613 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765660 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-dir\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765688 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-policies\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765708 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765735 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765756 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765812 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765876 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qbhh\" (UniqueName: \"kubernetes.io/projected/2c6cf59b-c655-44f0-975a-edba564bff9c-kube-api-access-2qbhh\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765898 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765929 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765958 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.765984 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.766049 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.766341 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-dir\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.767091 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.767313 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.768501 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-audit-policies\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.768821 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.774121 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.774869 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.775348 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.775797 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.776896 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.777399 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.778707 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.779562 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c6cf59b-c655-44f0-975a-edba564bff9c-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.786160 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qbhh\" (UniqueName: \"kubernetes.io/projected/2c6cf59b-c655-44f0-975a-edba564bff9c-kube-api-access-2qbhh\") pod \"oauth-openshift-7795679f96-9zmkq\" (UID: \"2c6cf59b-c655-44f0-975a-edba564bff9c\") " pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:29 crc kubenswrapper[4760]: I1124 17:07:29.898309 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:30 crc kubenswrapper[4760]: I1124 17:07:30.343848 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-9zmkq"] Nov 24 17:07:30 crc kubenswrapper[4760]: I1124 17:07:30.930792 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" event={"ID":"2c6cf59b-c655-44f0-975a-edba564bff9c","Type":"ContainerStarted","Data":"4131a8ecfb24a90f8a02b894dbb7759ace8aca89b946a2d4762d692fe974e348"} Nov 24 17:07:30 crc kubenswrapper[4760]: I1124 17:07:30.931237 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" event={"ID":"2c6cf59b-c655-44f0-975a-edba564bff9c","Type":"ContainerStarted","Data":"b3c922111acc0c5a5c8c9dfdcc2b6423643eeab5efe84c5f3196d71839db84b4"} Nov 24 17:07:30 crc kubenswrapper[4760]: I1124 17:07:30.931428 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:07:30 crc kubenswrapper[4760]: I1124 17:07:30.957226 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" podStartSLOduration=34.957205441 podStartE2EDuration="34.957205441s" podCreationTimestamp="2025-11-24 17:06:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:07:30.955258575 +0000 UTC m=+246.278140155" watchObservedRunningTime="2025-11-24 17:07:30.957205441 +0000 UTC m=+246.280087011" Nov 24 17:07:31 crc kubenswrapper[4760]: I1124 17:07:31.394066 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7795679f96-9zmkq" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.373576 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.376113 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xrzxz" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="registry-server" containerID="cri-o://16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952" gracePeriod=30 Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.380257 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.380641 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l96mk" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="registry-server" containerID="cri-o://dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8" gracePeriod=30 Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.395142 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.395528 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" containerID="cri-o://b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744" gracePeriod=30 Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.409536 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.409864 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l8l4r" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="registry-server" containerID="cri-o://9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81" gracePeriod=30 Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.414118 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.414402 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dffm4" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="registry-server" containerID="cri-o://03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a" gracePeriod=30 Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.439048 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jfn9q"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.440059 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.456513 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jfn9q"] Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.527095 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.527144 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsl4x\" (UniqueName: \"kubernetes.io/projected/3947261e-1d34-46c9-a769-f71d6e03f7d1-kube-api-access-dsl4x\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.527217 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.628112 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.628469 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.628614 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsl4x\" (UniqueName: \"kubernetes.io/projected/3947261e-1d34-46c9-a769-f71d6e03f7d1-kube-api-access-dsl4x\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.635770 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.637019 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3947261e-1d34-46c9-a769-f71d6e03f7d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.646418 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsl4x\" (UniqueName: \"kubernetes.io/projected/3947261e-1d34-46c9-a769-f71d6e03f7d1-kube-api-access-dsl4x\") pod \"marketplace-operator-79b997595-jfn9q\" (UID: \"3947261e-1d34-46c9-a769-f71d6e03f7d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.834591 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.841295 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.902113 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.930271 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.935460 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.936949 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities\") pod \"e3baee90-4b85-4f85-a756-67dcc7fb373a\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.937112 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdfxv\" (UniqueName: \"kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv\") pod \"e3baee90-4b85-4f85-a756-67dcc7fb373a\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.937765 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kd72g\" (UniqueName: \"kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g\") pod \"1d5f4395-aa76-4909-9736-9f67f65b9125\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.937840 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities\") pod \"443db8f4-7e0f-498f-9602-c93d1086f2cb\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.937919 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content\") pod \"1d5f4395-aa76-4909-9736-9f67f65b9125\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.937948 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content\") pod \"e3baee90-4b85-4f85-a756-67dcc7fb373a\" (UID: \"e3baee90-4b85-4f85-a756-67dcc7fb373a\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.938026 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmcq6\" (UniqueName: \"kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6\") pod \"443db8f4-7e0f-498f-9602-c93d1086f2cb\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.938089 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content\") pod \"443db8f4-7e0f-498f-9602-c93d1086f2cb\" (UID: \"443db8f4-7e0f-498f-9602-c93d1086f2cb\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.938120 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities\") pod \"1d5f4395-aa76-4909-9736-9f67f65b9125\" (UID: \"1d5f4395-aa76-4909-9736-9f67f65b9125\") " Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.939072 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities" (OuterVolumeSpecName: "utilities") pod "443db8f4-7e0f-498f-9602-c93d1086f2cb" (UID: "443db8f4-7e0f-498f-9602-c93d1086f2cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.939790 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities" (OuterVolumeSpecName: "utilities") pod "e3baee90-4b85-4f85-a756-67dcc7fb373a" (UID: "e3baee90-4b85-4f85-a756-67dcc7fb373a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.940994 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities" (OuterVolumeSpecName: "utilities") pod "1d5f4395-aa76-4909-9736-9f67f65b9125" (UID: "1d5f4395-aa76-4909-9736-9f67f65b9125"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.942555 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g" (OuterVolumeSpecName: "kube-api-access-kd72g") pod "1d5f4395-aa76-4909-9736-9f67f65b9125" (UID: "1d5f4395-aa76-4909-9736-9f67f65b9125"). InnerVolumeSpecName "kube-api-access-kd72g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.944397 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6" (OuterVolumeSpecName: "kube-api-access-fmcq6") pod "443db8f4-7e0f-498f-9602-c93d1086f2cb" (UID: "443db8f4-7e0f-498f-9602-c93d1086f2cb"). InnerVolumeSpecName "kube-api-access-fmcq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.944422 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:08:00 crc kubenswrapper[4760]: I1124 17:08:00.945041 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv" (OuterVolumeSpecName: "kube-api-access-jdfxv") pod "e3baee90-4b85-4f85-a756-67dcc7fb373a" (UID: "e3baee90-4b85-4f85-a756-67dcc7fb373a"). InnerVolumeSpecName "kube-api-access-jdfxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:00.994405 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3baee90-4b85-4f85-a756-67dcc7fb373a" (UID: "e3baee90-4b85-4f85-a756-67dcc7fb373a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.038297 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d5f4395-aa76-4909-9736-9f67f65b9125" (UID: "1d5f4395-aa76-4909-9736-9f67f65b9125"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.039780 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24zsj\" (UniqueName: \"kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj\") pod \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.039839 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities\") pod \"0d6c1b39-d49f-49ab-bfa7-c28657529520\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.039883 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppj2p\" (UniqueName: \"kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p\") pod \"0d6c1b39-d49f-49ab-bfa7-c28657529520\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.039916 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content\") pod \"0d6c1b39-d49f-49ab-bfa7-c28657529520\" (UID: \"0d6c1b39-d49f-49ab-bfa7-c28657529520\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.039938 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca\") pod \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040028 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics\") pod \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\" (UID: \"82a0fb7c-7c66-41a6-9ebb-5608d47ce382\") " Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040197 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmcq6\" (UniqueName: \"kubernetes.io/projected/443db8f4-7e0f-498f-9602-c93d1086f2cb-kube-api-access-fmcq6\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040226 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040237 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040247 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdfxv\" (UniqueName: \"kubernetes.io/projected/e3baee90-4b85-4f85-a756-67dcc7fb373a-kube-api-access-jdfxv\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040259 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kd72g\" (UniqueName: \"kubernetes.io/projected/1d5f4395-aa76-4909-9736-9f67f65b9125-kube-api-access-kd72g\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040268 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040279 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d5f4395-aa76-4909-9736-9f67f65b9125-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.040288 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3baee90-4b85-4f85-a756-67dcc7fb373a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.042493 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "82a0fb7c-7c66-41a6-9ebb-5608d47ce382" (UID: "82a0fb7c-7c66-41a6-9ebb-5608d47ce382"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.042875 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "443db8f4-7e0f-498f-9602-c93d1086f2cb" (UID: "443db8f4-7e0f-498f-9602-c93d1086f2cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.044681 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "82a0fb7c-7c66-41a6-9ebb-5608d47ce382" (UID: "82a0fb7c-7c66-41a6-9ebb-5608d47ce382"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.044692 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p" (OuterVolumeSpecName: "kube-api-access-ppj2p") pod "0d6c1b39-d49f-49ab-bfa7-c28657529520" (UID: "0d6c1b39-d49f-49ab-bfa7-c28657529520"). InnerVolumeSpecName "kube-api-access-ppj2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.046101 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj" (OuterVolumeSpecName: "kube-api-access-24zsj") pod "82a0fb7c-7c66-41a6-9ebb-5608d47ce382" (UID: "82a0fb7c-7c66-41a6-9ebb-5608d47ce382"). InnerVolumeSpecName "kube-api-access-24zsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.051098 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities" (OuterVolumeSpecName: "utilities") pod "0d6c1b39-d49f-49ab-bfa7-c28657529520" (UID: "0d6c1b39-d49f-49ab-bfa7-c28657529520"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.110892 4760 generic.go:334] "Generic (PLEG): container finished" podID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerID="9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81" exitCode=0 Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.110970 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerDied","Data":"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.111000 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8l4r" event={"ID":"e3baee90-4b85-4f85-a756-67dcc7fb373a","Type":"ContainerDied","Data":"50fcd972edddb593d767e837a982daa18dc76f6104acb84d63d1438aeb844fb1"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.111025 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8l4r" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.111033 4760 scope.go:117] "RemoveContainer" containerID="9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.114870 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerID="03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a" exitCode=0 Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.114906 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerDied","Data":"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.114923 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dffm4" event={"ID":"0d6c1b39-d49f-49ab-bfa7-c28657529520","Type":"ContainerDied","Data":"7dd85ecf0101d68646d7c97efbf164d9d527fd3d460ffae290dc3fc383f2c812"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.114974 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dffm4" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.116781 4760 generic.go:334] "Generic (PLEG): container finished" podID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerID="b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744" exitCode=0 Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.116822 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" event={"ID":"82a0fb7c-7c66-41a6-9ebb-5608d47ce382","Type":"ContainerDied","Data":"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.116838 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" event={"ID":"82a0fb7c-7c66-41a6-9ebb-5608d47ce382","Type":"ContainerDied","Data":"03887378f56467101cada4a624b999db410ce3a0a4eec25184f0b8000e760e7f"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.116874 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wwglm" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.127811 4760 generic.go:334] "Generic (PLEG): container finished" podID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerID="dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8" exitCode=0 Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.127926 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l96mk" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.128326 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerDied","Data":"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.128366 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l96mk" event={"ID":"1d5f4395-aa76-4909-9736-9f67f65b9125","Type":"ContainerDied","Data":"87d3a601f5b4ac2e5fd5429b72e3b948b326eb9c4cc3766f80510e6602467beb"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.135393 4760 scope.go:117] "RemoveContainer" containerID="b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.138232 4760 generic.go:334] "Generic (PLEG): container finished" podID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerID="16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952" exitCode=0 Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.138307 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerDied","Data":"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.138357 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xrzxz" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.138857 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xrzxz" event={"ID":"443db8f4-7e0f-498f-9602-c93d1086f2cb","Type":"ContainerDied","Data":"c0adebb787cb5f3fb4a0317257a7aa47f59c536af5f1dac0ef15258880cfa304"} Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140867 4760 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140892 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24zsj\" (UniqueName: \"kubernetes.io/projected/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-kube-api-access-24zsj\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140902 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140912 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/443db8f4-7e0f-498f-9602-c93d1086f2cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140920 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppj2p\" (UniqueName: \"kubernetes.io/projected/0d6c1b39-d49f-49ab-bfa7-c28657529520-kube-api-access-ppj2p\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.140929 4760 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82a0fb7c-7c66-41a6-9ebb-5608d47ce382-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.147915 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.150117 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d6c1b39-d49f-49ab-bfa7-c28657529520" (UID: "0d6c1b39-d49f-49ab-bfa7-c28657529520"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.152317 4760 scope.go:117] "RemoveContainer" containerID="6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.154072 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wwglm"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.170959 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.178115 4760 scope.go:117] "RemoveContainer" containerID="9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.178303 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l96mk"] Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.178879 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81\": container with ID starting with 9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81 not found: ID does not exist" containerID="9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.179035 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81"} err="failed to get container status \"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81\": rpc error: code = NotFound desc = could not find container \"9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81\": container with ID starting with 9b7eddeeb299e81600fe1dcec9688be7b38e558c9233b87a79d2012d07204f81 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.179157 4760 scope.go:117] "RemoveContainer" containerID="b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.182221 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc\": container with ID starting with b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc not found: ID does not exist" containerID="b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.182347 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc"} err="failed to get container status \"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc\": rpc error: code = NotFound desc = could not find container \"b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc\": container with ID starting with b44ddcf9bdf9a4e910b188107fe1c81c1249d0f73d9d7f59f8a5fa9817412adc not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.182464 4760 scope.go:117] "RemoveContainer" containerID="6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.183389 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49\": container with ID starting with 6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49 not found: ID does not exist" containerID="6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.183491 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49"} err="failed to get container status \"6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49\": rpc error: code = NotFound desc = could not find container \"6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49\": container with ID starting with 6b72864b2653b253dea204fa0d3fc38d60a00489048a048ec11a2f4e282d7b49 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.183584 4760 scope.go:117] "RemoveContainer" containerID="03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.187323 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.193135 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8l4r"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.195636 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.198339 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xrzxz"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.204359 4760 scope.go:117] "RemoveContainer" containerID="088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.221855 4760 scope.go:117] "RemoveContainer" containerID="a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.235181 4760 scope.go:117] "RemoveContainer" containerID="03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.235668 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a\": container with ID starting with 03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a not found: ID does not exist" containerID="03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.235776 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a"} err="failed to get container status \"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a\": rpc error: code = NotFound desc = could not find container \"03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a\": container with ID starting with 03e252efe6632204ded1dd2911238f2dc77c9657a44ef206ad3bf7f3d4cfdb9a not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.235862 4760 scope.go:117] "RemoveContainer" containerID="088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.236286 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc\": container with ID starting with 088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc not found: ID does not exist" containerID="088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.236326 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc"} err="failed to get container status \"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc\": rpc error: code = NotFound desc = could not find container \"088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc\": container with ID starting with 088a404aa7236d0e6916e191d53b717a790b20a270e03490fb4f43530af0decc not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.236357 4760 scope.go:117] "RemoveContainer" containerID="a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.236624 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b\": container with ID starting with a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b not found: ID does not exist" containerID="a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.236704 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b"} err="failed to get container status \"a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b\": rpc error: code = NotFound desc = could not find container \"a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b\": container with ID starting with a54e66029779dbfd311bf44279e1afb4d6dbc580ea6774a1234a082dca875e5b not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.236810 4760 scope.go:117] "RemoveContainer" containerID="b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.241518 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6c1b39-d49f-49ab-bfa7-c28657529520-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.247974 4760 scope.go:117] "RemoveContainer" containerID="b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.248308 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744\": container with ID starting with b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744 not found: ID does not exist" containerID="b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.248337 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744"} err="failed to get container status \"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744\": rpc error: code = NotFound desc = could not find container \"b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744\": container with ID starting with b6ff2eb2a2263c3682b228752c002d688c40bce168586f9c9176dbc14b0ec744 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.248358 4760 scope.go:117] "RemoveContainer" containerID="dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.263246 4760 scope.go:117] "RemoveContainer" containerID="f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.283579 4760 scope.go:117] "RemoveContainer" containerID="aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.290115 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jfn9q"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.297309 4760 scope.go:117] "RemoveContainer" containerID="dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.298062 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8\": container with ID starting with dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8 not found: ID does not exist" containerID="dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.298102 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8"} err="failed to get container status \"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8\": rpc error: code = NotFound desc = could not find container \"dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8\": container with ID starting with dc86b39c6987ee734512a67ac346fc2c6334c5e96581bdf04647965558a362f8 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.298131 4760 scope.go:117] "RemoveContainer" containerID="f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.308307 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb\": container with ID starting with f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb not found: ID does not exist" containerID="f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.308714 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb"} err="failed to get container status \"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb\": rpc error: code = NotFound desc = could not find container \"f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb\": container with ID starting with f44a1a1bde8a2421f512732ceb3537962031bc3abe1dbf95960c33bd9e8a6cfb not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.308877 4760 scope.go:117] "RemoveContainer" containerID="aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.311464 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347\": container with ID starting with aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347 not found: ID does not exist" containerID="aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.311511 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347"} err="failed to get container status \"aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347\": rpc error: code = NotFound desc = could not find container \"aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347\": container with ID starting with aa696ddb76237f485ccc6a06f8c26235da6ad0999722a20d6a11b0d430657347 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.311540 4760 scope.go:117] "RemoveContainer" containerID="16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.333756 4760 scope.go:117] "RemoveContainer" containerID="0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.350392 4760 scope.go:117] "RemoveContainer" containerID="97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.367615 4760 scope.go:117] "RemoveContainer" containerID="16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.368852 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952\": container with ID starting with 16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952 not found: ID does not exist" containerID="16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.368889 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952"} err="failed to get container status \"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952\": rpc error: code = NotFound desc = could not find container \"16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952\": container with ID starting with 16ef54ae423720cee338fa02d039088cfbf20441a77ee01c7191600d9b4b9952 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.368914 4760 scope.go:117] "RemoveContainer" containerID="0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.369230 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b\": container with ID starting with 0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b not found: ID does not exist" containerID="0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.369329 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b"} err="failed to get container status \"0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b\": rpc error: code = NotFound desc = could not find container \"0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b\": container with ID starting with 0579b2ed49095fc43571d02bf135434b3efcb44997eba76d1c5ea1c217f1009b not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.369432 4760 scope.go:117] "RemoveContainer" containerID="97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227" Nov 24 17:08:01 crc kubenswrapper[4760]: E1124 17:08:01.369792 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227\": container with ID starting with 97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227 not found: ID does not exist" containerID="97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.369877 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227"} err="failed to get container status \"97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227\": rpc error: code = NotFound desc = could not find container \"97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227\": container with ID starting with 97437f7adf9dfd97409c51d0796ee2aabe0652bb0813504176593d0ccfe59227 not found: ID does not exist" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.441439 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.452569 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dffm4"] Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.472188 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" path="/var/lib/kubelet/pods/0d6c1b39-d49f-49ab-bfa7-c28657529520/volumes" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.472997 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" path="/var/lib/kubelet/pods/1d5f4395-aa76-4909-9736-9f67f65b9125/volumes" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.473765 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" path="/var/lib/kubelet/pods/443db8f4-7e0f-498f-9602-c93d1086f2cb/volumes" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.474921 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" path="/var/lib/kubelet/pods/82a0fb7c-7c66-41a6-9ebb-5608d47ce382/volumes" Nov 24 17:08:01 crc kubenswrapper[4760]: I1124 17:08:01.475484 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" path="/var/lib/kubelet/pods/e3baee90-4b85-4f85-a756-67dcc7fb373a/volumes" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.153283 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" event={"ID":"3947261e-1d34-46c9-a769-f71d6e03f7d1","Type":"ContainerStarted","Data":"ec3bb9d02f65dfb02aaff7e1d9253c597355c526a6b1f007b9b448b210986efa"} Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.153661 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" event={"ID":"3947261e-1d34-46c9-a769-f71d6e03f7d1","Type":"ContainerStarted","Data":"6b1319178041cc79a80f3243e69cf9777f29ac3a0898df51ebd6edea9bc415bc"} Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.153679 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.157371 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.170234 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-jfn9q" podStartSLOduration=2.170215115 podStartE2EDuration="2.170215115s" podCreationTimestamp="2025-11-24 17:08:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:08:02.16622805 +0000 UTC m=+277.489109620" watchObservedRunningTime="2025-11-24 17:08:02.170215115 +0000 UTC m=+277.493096665" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585636 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p2cws"] Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585820 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585831 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585839 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585844 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585853 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585860 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585870 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585875 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585884 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585889 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585897 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585903 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585911 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585916 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585924 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585929 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585937 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585943 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585954 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585959 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585968 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585973 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="extract-content" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585984 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.585990 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" Nov 24 17:08:02 crc kubenswrapper[4760]: E1124 17:08:02.585997 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586003 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="extract-utilities" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586094 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d5f4395-aa76-4909-9736-9f67f65b9125" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586105 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="443db8f4-7e0f-498f-9602-c93d1086f2cb" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586115 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="82a0fb7c-7c66-41a6-9ebb-5608d47ce382" containerName="marketplace-operator" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586125 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3baee90-4b85-4f85-a756-67dcc7fb373a" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586131 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d6c1b39-d49f-49ab-bfa7-c28657529520" containerName="registry-server" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.586721 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.591507 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.604710 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2cws"] Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.663969 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-utilities\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.664001 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-catalog-content\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.664040 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds5cs\" (UniqueName: \"kubernetes.io/projected/56c164f7-0218-4d51-af82-508b2f979a6f-kube-api-access-ds5cs\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.765709 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-utilities\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.765799 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-catalog-content\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.765892 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds5cs\" (UniqueName: \"kubernetes.io/projected/56c164f7-0218-4d51-af82-508b2f979a6f-kube-api-access-ds5cs\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.766316 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-utilities\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.766542 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56c164f7-0218-4d51-af82-508b2f979a6f-catalog-content\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.786339 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4nwzh"] Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.787506 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.792824 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.800392 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds5cs\" (UniqueName: \"kubernetes.io/projected/56c164f7-0218-4d51-af82-508b2f979a6f-kube-api-access-ds5cs\") pod \"redhat-marketplace-p2cws\" (UID: \"56c164f7-0218-4d51-af82-508b2f979a6f\") " pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.805899 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4nwzh"] Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.867364 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-catalog-content\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.867446 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-utilities\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.867535 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8mw4\" (UniqueName: \"kubernetes.io/projected/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-kube-api-access-k8mw4\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.951854 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.969090 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-utilities\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.969220 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8mw4\" (UniqueName: \"kubernetes.io/projected/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-kube-api-access-k8mw4\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.969362 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-catalog-content\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.970345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-catalog-content\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:02 crc kubenswrapper[4760]: I1124 17:08:02.970909 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-utilities\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:03 crc kubenswrapper[4760]: I1124 17:08:03.810206 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8mw4\" (UniqueName: \"kubernetes.io/projected/eb9f8a49-7730-403e-bf3a-7aefc7e44b93-kube-api-access-k8mw4\") pod \"redhat-operators-4nwzh\" (UID: \"eb9f8a49-7730-403e-bf3a-7aefc7e44b93\") " pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.044347 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.148556 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2cws"] Nov 24 17:08:04 crc kubenswrapper[4760]: W1124 17:08:04.171798 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56c164f7_0218_4d51_af82_508b2f979a6f.slice/crio-ec94edb79dd701cf34832855c8d4aaf19cea83f9268a4ac5c84c096bc552acfd WatchSource:0}: Error finding container ec94edb79dd701cf34832855c8d4aaf19cea83f9268a4ac5c84c096bc552acfd: Status 404 returned error can't find the container with id ec94edb79dd701cf34832855c8d4aaf19cea83f9268a4ac5c84c096bc552acfd Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.530846 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4nwzh"] Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.986856 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r2bbn"] Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.988335 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:04 crc kubenswrapper[4760]: I1124 17:08:04.991913 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.037034 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r2bbn"] Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.105922 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-utilities\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.106037 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4w4m\" (UniqueName: \"kubernetes.io/projected/b10118dc-03fc-435c-8510-00a210c546a4-kube-api-access-r4w4m\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.106107 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-catalog-content\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.183775 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2kfvr"] Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.183811 4760 generic.go:334] "Generic (PLEG): container finished" podID="eb9f8a49-7730-403e-bf3a-7aefc7e44b93" containerID="5a4a6304ec9c23b611f57827ddde99ad136af6e49dc4604c9d97d54a301c0e01" exitCode=0 Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.185149 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nwzh" event={"ID":"eb9f8a49-7730-403e-bf3a-7aefc7e44b93","Type":"ContainerDied","Data":"5a4a6304ec9c23b611f57827ddde99ad136af6e49dc4604c9d97d54a301c0e01"} Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.185192 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nwzh" event={"ID":"eb9f8a49-7730-403e-bf3a-7aefc7e44b93","Type":"ContainerStarted","Data":"677a3c8db8f8cbcda1cd4940b515f537a3ce8292ee70403d829a594a778931af"} Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.185290 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.187343 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.189219 4760 generic.go:334] "Generic (PLEG): container finished" podID="56c164f7-0218-4d51-af82-508b2f979a6f" containerID="7059e50c0c7d1a31e608aa01edd94710af2ff9a596570231bf1ed33dea9e1b0a" exitCode=0 Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.189250 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2cws" event={"ID":"56c164f7-0218-4d51-af82-508b2f979a6f","Type":"ContainerDied","Data":"7059e50c0c7d1a31e608aa01edd94710af2ff9a596570231bf1ed33dea9e1b0a"} Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.189274 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2cws" event={"ID":"56c164f7-0218-4d51-af82-508b2f979a6f","Type":"ContainerStarted","Data":"ec94edb79dd701cf34832855c8d4aaf19cea83f9268a4ac5c84c096bc552acfd"} Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.199400 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2kfvr"] Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207663 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-utilities\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207709 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-catalog-content\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207742 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4w4m\" (UniqueName: \"kubernetes.io/projected/b10118dc-03fc-435c-8510-00a210c546a4-kube-api-access-r4w4m\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207767 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-catalog-content\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207797 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s9qf\" (UniqueName: \"kubernetes.io/projected/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-kube-api-access-7s9qf\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.207828 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-utilities\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.208187 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-utilities\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.208397 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10118dc-03fc-435c-8510-00a210c546a4-catalog-content\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.236200 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4w4m\" (UniqueName: \"kubernetes.io/projected/b10118dc-03fc-435c-8510-00a210c546a4-kube-api-access-r4w4m\") pod \"community-operators-r2bbn\" (UID: \"b10118dc-03fc-435c-8510-00a210c546a4\") " pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.308675 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s9qf\" (UniqueName: \"kubernetes.io/projected/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-kube-api-access-7s9qf\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.308776 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-utilities\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.308853 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-catalog-content\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.309282 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-utilities\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.309559 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-catalog-content\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.325201 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s9qf\" (UniqueName: \"kubernetes.io/projected/3e811c73-6a8f-42b7-9a9c-4a062f6313cb-kube-api-access-7s9qf\") pod \"certified-operators-2kfvr\" (UID: \"3e811c73-6a8f-42b7-9a9c-4a062f6313cb\") " pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.352592 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.506544 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.739585 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r2bbn"] Nov 24 17:08:05 crc kubenswrapper[4760]: W1124 17:08:05.745912 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb10118dc_03fc_435c_8510_00a210c546a4.slice/crio-6134cf9a94265bd1e5b6d0a3a1616b17ce78cbc0cc14b428271a7c5d7d488c52 WatchSource:0}: Error finding container 6134cf9a94265bd1e5b6d0a3a1616b17ce78cbc0cc14b428271a7c5d7d488c52: Status 404 returned error can't find the container with id 6134cf9a94265bd1e5b6d0a3a1616b17ce78cbc0cc14b428271a7c5d7d488c52 Nov 24 17:08:05 crc kubenswrapper[4760]: I1124 17:08:05.915463 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2kfvr"] Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.196498 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nwzh" event={"ID":"eb9f8a49-7730-403e-bf3a-7aefc7e44b93","Type":"ContainerStarted","Data":"389eb43f262d9c5004ecce1e2cb477fe6666a2406ad29d7700716e07273569c3"} Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.198542 4760 generic.go:334] "Generic (PLEG): container finished" podID="3e811c73-6a8f-42b7-9a9c-4a062f6313cb" containerID="bac9cdfce34e1cb417491a6aa1f4b7d91029702bb625c3663d86263319aa9bb3" exitCode=0 Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.198618 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2kfvr" event={"ID":"3e811c73-6a8f-42b7-9a9c-4a062f6313cb","Type":"ContainerDied","Data":"bac9cdfce34e1cb417491a6aa1f4b7d91029702bb625c3663d86263319aa9bb3"} Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.199017 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2kfvr" event={"ID":"3e811c73-6a8f-42b7-9a9c-4a062f6313cb","Type":"ContainerStarted","Data":"224967230f1ead49051b037bdd185e28a9a12843b6708c19cf8ab688fe86e02c"} Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.201027 4760 generic.go:334] "Generic (PLEG): container finished" podID="b10118dc-03fc-435c-8510-00a210c546a4" containerID="c2b710e21d5c225cbd2b59c95ae7a91ec7733d44fb5c2c522356fb9739fc335f" exitCode=0 Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.201114 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2bbn" event={"ID":"b10118dc-03fc-435c-8510-00a210c546a4","Type":"ContainerDied","Data":"c2b710e21d5c225cbd2b59c95ae7a91ec7733d44fb5c2c522356fb9739fc335f"} Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.201139 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2bbn" event={"ID":"b10118dc-03fc-435c-8510-00a210c546a4","Type":"ContainerStarted","Data":"6134cf9a94265bd1e5b6d0a3a1616b17ce78cbc0cc14b428271a7c5d7d488c52"} Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.204123 4760 generic.go:334] "Generic (PLEG): container finished" podID="56c164f7-0218-4d51-af82-508b2f979a6f" containerID="9aacb29e5eabcd2548dca4ef4c899f21ac74c08f0fbf185cf52aa95270872647" exitCode=0 Nov 24 17:08:06 crc kubenswrapper[4760]: I1124 17:08:06.204182 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2cws" event={"ID":"56c164f7-0218-4d51-af82-508b2f979a6f","Type":"ContainerDied","Data":"9aacb29e5eabcd2548dca4ef4c899f21ac74c08f0fbf185cf52aa95270872647"} Nov 24 17:08:07 crc kubenswrapper[4760]: I1124 17:08:07.213581 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2cws" event={"ID":"56c164f7-0218-4d51-af82-508b2f979a6f","Type":"ContainerStarted","Data":"1282da69a31a6734f4ce84d74ffccb086afa59d57b7a9c5cea01c9298ca92e54"} Nov 24 17:08:07 crc kubenswrapper[4760]: I1124 17:08:07.219068 4760 generic.go:334] "Generic (PLEG): container finished" podID="eb9f8a49-7730-403e-bf3a-7aefc7e44b93" containerID="389eb43f262d9c5004ecce1e2cb477fe6666a2406ad29d7700716e07273569c3" exitCode=0 Nov 24 17:08:07 crc kubenswrapper[4760]: I1124 17:08:07.219267 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nwzh" event={"ID":"eb9f8a49-7730-403e-bf3a-7aefc7e44b93","Type":"ContainerDied","Data":"389eb43f262d9c5004ecce1e2cb477fe6666a2406ad29d7700716e07273569c3"} Nov 24 17:08:07 crc kubenswrapper[4760]: I1124 17:08:07.225062 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2bbn" event={"ID":"b10118dc-03fc-435c-8510-00a210c546a4","Type":"ContainerStarted","Data":"8fc25e955decd6b19e19ee4d586d60ec2072e84aa3451290f2d6c975ff87800d"} Nov 24 17:08:07 crc kubenswrapper[4760]: I1124 17:08:07.236346 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p2cws" podStartSLOduration=3.808679342 podStartE2EDuration="5.236331873s" podCreationTimestamp="2025-11-24 17:08:02 +0000 UTC" firstStartedPulling="2025-11-24 17:08:05.191933794 +0000 UTC m=+280.514815374" lastFinishedPulling="2025-11-24 17:08:06.619586355 +0000 UTC m=+281.942467905" observedRunningTime="2025-11-24 17:08:07.236280301 +0000 UTC m=+282.559161861" watchObservedRunningTime="2025-11-24 17:08:07.236331873 +0000 UTC m=+282.559213423" Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.233268 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nwzh" event={"ID":"eb9f8a49-7730-403e-bf3a-7aefc7e44b93","Type":"ContainerStarted","Data":"b6dd7e0d0dceec33856cb042061fa3f90cbed61eb08d6200c454f0be1ea59006"} Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.235342 4760 generic.go:334] "Generic (PLEG): container finished" podID="3e811c73-6a8f-42b7-9a9c-4a062f6313cb" containerID="0ee2d868b094ec1d2dfd0ec58cb7424916455217088a7cba999af6da7aa0fcbc" exitCode=0 Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.235413 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2kfvr" event={"ID":"3e811c73-6a8f-42b7-9a9c-4a062f6313cb","Type":"ContainerDied","Data":"0ee2d868b094ec1d2dfd0ec58cb7424916455217088a7cba999af6da7aa0fcbc"} Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.238298 4760 generic.go:334] "Generic (PLEG): container finished" podID="b10118dc-03fc-435c-8510-00a210c546a4" containerID="8fc25e955decd6b19e19ee4d586d60ec2072e84aa3451290f2d6c975ff87800d" exitCode=0 Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.240161 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2bbn" event={"ID":"b10118dc-03fc-435c-8510-00a210c546a4","Type":"ContainerDied","Data":"8fc25e955decd6b19e19ee4d586d60ec2072e84aa3451290f2d6c975ff87800d"} Nov 24 17:08:08 crc kubenswrapper[4760]: I1124 17:08:08.255901 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4nwzh" podStartSLOduration=3.822571415 podStartE2EDuration="6.255855919s" podCreationTimestamp="2025-11-24 17:08:02 +0000 UTC" firstStartedPulling="2025-11-24 17:08:05.186640281 +0000 UTC m=+280.509521831" lastFinishedPulling="2025-11-24 17:08:07.619924785 +0000 UTC m=+282.942806335" observedRunningTime="2025-11-24 17:08:08.252158641 +0000 UTC m=+283.575040191" watchObservedRunningTime="2025-11-24 17:08:08.255855919 +0000 UTC m=+283.578737479" Nov 24 17:08:09 crc kubenswrapper[4760]: I1124 17:08:09.248818 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2kfvr" event={"ID":"3e811c73-6a8f-42b7-9a9c-4a062f6313cb","Type":"ContainerStarted","Data":"a6c4efbd948923341d35aca6561fc5f16aaea692241d9490eb962a0b41e49d2d"} Nov 24 17:08:09 crc kubenswrapper[4760]: I1124 17:08:09.252152 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2bbn" event={"ID":"b10118dc-03fc-435c-8510-00a210c546a4","Type":"ContainerStarted","Data":"6d26547694e60910dcf33edacf5e5ca67c44a926a5db89de06f27849daca16c5"} Nov 24 17:08:09 crc kubenswrapper[4760]: I1124 17:08:09.270731 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2kfvr" podStartSLOduration=1.5794856510000002 podStartE2EDuration="4.27071243s" podCreationTimestamp="2025-11-24 17:08:05 +0000 UTC" firstStartedPulling="2025-11-24 17:08:06.201928604 +0000 UTC m=+281.524810144" lastFinishedPulling="2025-11-24 17:08:08.893155363 +0000 UTC m=+284.216036923" observedRunningTime="2025-11-24 17:08:09.269409232 +0000 UTC m=+284.592290782" watchObservedRunningTime="2025-11-24 17:08:09.27071243 +0000 UTC m=+284.593593980" Nov 24 17:08:09 crc kubenswrapper[4760]: I1124 17:08:09.285456 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r2bbn" podStartSLOduration=2.872484954 podStartE2EDuration="5.285436657s" podCreationTimestamp="2025-11-24 17:08:04 +0000 UTC" firstStartedPulling="2025-11-24 17:08:06.203442278 +0000 UTC m=+281.526323828" lastFinishedPulling="2025-11-24 17:08:08.616393971 +0000 UTC m=+283.939275531" observedRunningTime="2025-11-24 17:08:09.283560393 +0000 UTC m=+284.606441963" watchObservedRunningTime="2025-11-24 17:08:09.285436657 +0000 UTC m=+284.608318217" Nov 24 17:08:12 crc kubenswrapper[4760]: I1124 17:08:12.952795 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:12 crc kubenswrapper[4760]: I1124 17:08:12.953463 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:13 crc kubenswrapper[4760]: I1124 17:08:13.027175 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:13 crc kubenswrapper[4760]: I1124 17:08:13.335520 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p2cws" Nov 24 17:08:14 crc kubenswrapper[4760]: I1124 17:08:14.045198 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:14 crc kubenswrapper[4760]: I1124 17:08:14.047165 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:14 crc kubenswrapper[4760]: I1124 17:08:14.104347 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:14 crc kubenswrapper[4760]: I1124 17:08:14.341910 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4nwzh" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.353347 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.353435 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.421233 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.507513 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.507832 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:15 crc kubenswrapper[4760]: I1124 17:08:15.571543 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:16 crc kubenswrapper[4760]: I1124 17:08:16.357404 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2kfvr" Nov 24 17:08:16 crc kubenswrapper[4760]: I1124 17:08:16.357871 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r2bbn" Nov 24 17:08:25 crc kubenswrapper[4760]: I1124 17:08:25.212939 4760 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 24 17:09:35 crc kubenswrapper[4760]: I1124 17:09:35.642334 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:09:35 crc kubenswrapper[4760]: I1124 17:09:35.643077 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.050943 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mvgqq"] Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.052256 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.065834 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mvgqq"] Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114405 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114458 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114489 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlnp4\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-kube-api-access-qlnp4\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114531 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-trusted-ca\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114551 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-certificates\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-bound-sa-token\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114677 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.114793 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-tls\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.138818 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.215900 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-trusted-ca\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.215940 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-certificates\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.215958 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-bound-sa-token\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.215989 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.216067 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-tls\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.216116 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.216150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlnp4\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-kube-api-access-qlnp4\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.217423 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-certificates\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.217744 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.217910 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-trusted-ca\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.222498 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-registry-tls\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.223339 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.230806 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlnp4\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-kube-api-access-qlnp4\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.234414 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e7eefe86-a6d0-48cd-9b94-c6fcc27f757c-bound-sa-token\") pod \"image-registry-66df7c8f76-mvgqq\" (UID: \"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c\") " pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.374247 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.661926 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mvgqq"] Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.980164 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" event={"ID":"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c","Type":"ContainerStarted","Data":"47f80e1c87eb8021a95e4f1238e3c71bbcb536206752f3bd3aa33a65d6ff184a"} Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.980240 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" event={"ID":"e7eefe86-a6d0-48cd-9b94-c6fcc27f757c","Type":"ContainerStarted","Data":"11846415b741d1372d3d399aa955317a404e952548ab3bc317293111d46b4b63"} Nov 24 17:09:57 crc kubenswrapper[4760]: I1124 17:09:57.980406 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:09:58 crc kubenswrapper[4760]: I1124 17:09:58.009327 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" podStartSLOduration=1.009295668 podStartE2EDuration="1.009295668s" podCreationTimestamp="2025-11-24 17:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:09:58.004345513 +0000 UTC m=+393.327227063" watchObservedRunningTime="2025-11-24 17:09:58.009295668 +0000 UTC m=+393.332177258" Nov 24 17:10:05 crc kubenswrapper[4760]: I1124 17:10:05.643066 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:10:05 crc kubenswrapper[4760]: I1124 17:10:05.643960 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:10:17 crc kubenswrapper[4760]: I1124 17:10:17.383726 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-mvgqq" Nov 24 17:10:17 crc kubenswrapper[4760]: I1124 17:10:17.464444 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:10:35 crc kubenswrapper[4760]: I1124 17:10:35.642450 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:10:35 crc kubenswrapper[4760]: I1124 17:10:35.643329 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:10:35 crc kubenswrapper[4760]: I1124 17:10:35.643426 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:10:35 crc kubenswrapper[4760]: I1124 17:10:35.644255 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:10:35 crc kubenswrapper[4760]: I1124 17:10:35.644360 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb" gracePeriod=600 Nov 24 17:10:36 crc kubenswrapper[4760]: I1124 17:10:36.251890 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb" exitCode=0 Nov 24 17:10:36 crc kubenswrapper[4760]: I1124 17:10:36.252055 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb"} Nov 24 17:10:36 crc kubenswrapper[4760]: I1124 17:10:36.252470 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3"} Nov 24 17:10:36 crc kubenswrapper[4760]: I1124 17:10:36.252533 4760 scope.go:117] "RemoveContainer" containerID="61faa0491fbc6c5becd3aefcdcd27858d4f14134f77c17b311ff9815640e5696" Nov 24 17:10:42 crc kubenswrapper[4760]: I1124 17:10:42.520208 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" podUID="28fd1340-959d-4b58-8ad7-c654176844e2" containerName="registry" containerID="cri-o://24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e" gracePeriod=30 Nov 24 17:10:42 crc kubenswrapper[4760]: I1124 17:10:42.932355 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026272 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccgqj\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026334 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026358 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026382 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026434 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026489 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026620 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.026651 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca\") pod \"28fd1340-959d-4b58-8ad7-c654176844e2\" (UID: \"28fd1340-959d-4b58-8ad7-c654176844e2\") " Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.027499 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.028564 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.035205 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.035441 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.035487 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.038306 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj" (OuterVolumeSpecName: "kube-api-access-ccgqj") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "kube-api-access-ccgqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.038738 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.043170 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "28fd1340-959d-4b58-8ad7-c654176844e2" (UID: "28fd1340-959d-4b58-8ad7-c654176844e2"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128217 4760 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28fd1340-959d-4b58-8ad7-c654176844e2-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128250 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128261 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccgqj\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-kube-api-access-ccgqj\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128270 4760 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28fd1340-959d-4b58-8ad7-c654176844e2-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128278 4760 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128286 4760 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28fd1340-959d-4b58-8ad7-c654176844e2-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.128295 4760 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28fd1340-959d-4b58-8ad7-c654176844e2-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.310967 4760 generic.go:334] "Generic (PLEG): container finished" podID="28fd1340-959d-4b58-8ad7-c654176844e2" containerID="24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e" exitCode=0 Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.311076 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.311076 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" event={"ID":"28fd1340-959d-4b58-8ad7-c654176844e2","Type":"ContainerDied","Data":"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e"} Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.311191 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2qfzh" event={"ID":"28fd1340-959d-4b58-8ad7-c654176844e2","Type":"ContainerDied","Data":"bf5bdd691afa594d270f670922b235fecc11fe251c82cc271ad4f91ad2ea67b1"} Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.311257 4760 scope.go:117] "RemoveContainer" containerID="24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.337363 4760 scope.go:117] "RemoveContainer" containerID="24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e" Nov 24 17:10:43 crc kubenswrapper[4760]: E1124 17:10:43.338099 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e\": container with ID starting with 24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e not found: ID does not exist" containerID="24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.338162 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e"} err="failed to get container status \"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e\": rpc error: code = NotFound desc = could not find container \"24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e\": container with ID starting with 24f4d590954051e584b152191fb276031ce78cd7a0ef1c3cb88ec3df5af2ad0e not found: ID does not exist" Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.355661 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.363998 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2qfzh"] Nov 24 17:10:43 crc kubenswrapper[4760]: I1124 17:10:43.476910 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28fd1340-959d-4b58-8ad7-c654176844e2" path="/var/lib/kubelet/pods/28fd1340-959d-4b58-8ad7-c654176844e2/volumes" Nov 24 17:12:35 crc kubenswrapper[4760]: I1124 17:12:35.643490 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:12:35 crc kubenswrapper[4760]: I1124 17:12:35.644357 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:13:05 crc kubenswrapper[4760]: I1124 17:13:05.643146 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:13:05 crc kubenswrapper[4760]: I1124 17:13:05.643989 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.554364 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qn2xj"] Nov 24 17:13:25 crc kubenswrapper[4760]: E1124 17:13:25.555155 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28fd1340-959d-4b58-8ad7-c654176844e2" containerName="registry" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.555177 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="28fd1340-959d-4b58-8ad7-c654176844e2" containerName="registry" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.555386 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="28fd1340-959d-4b58-8ad7-c654176844e2" containerName="registry" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.556034 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.559031 4760 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-86kbl" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.559381 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.563323 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.564757 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xtps2"] Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.565566 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-xtps2" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.569790 4760 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vrlrd" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.569848 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qn2xj"] Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.574066 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xtps2"] Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.585139 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-dcdmq"] Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.585916 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.589393 4760 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-r7j76" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.592351 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-dcdmq"] Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.705452 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzh7g\" (UniqueName: \"kubernetes.io/projected/e64b9328-142b-47be-a2f9-9c2339244683-kube-api-access-rzh7g\") pod \"cert-manager-cainjector-7f985d654d-qn2xj\" (UID: \"e64b9328-142b-47be-a2f9-9c2339244683\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.705514 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56z2h\" (UniqueName: \"kubernetes.io/projected/bc25f619-5720-41cd-9fe6-beb030debe00-kube-api-access-56z2h\") pod \"cert-manager-5b446d88c5-xtps2\" (UID: \"bc25f619-5720-41cd-9fe6-beb030debe00\") " pod="cert-manager/cert-manager-5b446d88c5-xtps2" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.705542 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mjrm\" (UniqueName: \"kubernetes.io/projected/32a75cd6-7dcc-409b-9208-86578c121ec7-kube-api-access-4mjrm\") pod \"cert-manager-webhook-5655c58dd6-dcdmq\" (UID: \"32a75cd6-7dcc-409b-9208-86578c121ec7\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.806491 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzh7g\" (UniqueName: \"kubernetes.io/projected/e64b9328-142b-47be-a2f9-9c2339244683-kube-api-access-rzh7g\") pod \"cert-manager-cainjector-7f985d654d-qn2xj\" (UID: \"e64b9328-142b-47be-a2f9-9c2339244683\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.806556 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56z2h\" (UniqueName: \"kubernetes.io/projected/bc25f619-5720-41cd-9fe6-beb030debe00-kube-api-access-56z2h\") pod \"cert-manager-5b446d88c5-xtps2\" (UID: \"bc25f619-5720-41cd-9fe6-beb030debe00\") " pod="cert-manager/cert-manager-5b446d88c5-xtps2" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.806582 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mjrm\" (UniqueName: \"kubernetes.io/projected/32a75cd6-7dcc-409b-9208-86578c121ec7-kube-api-access-4mjrm\") pod \"cert-manager-webhook-5655c58dd6-dcdmq\" (UID: \"32a75cd6-7dcc-409b-9208-86578c121ec7\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.829713 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mjrm\" (UniqueName: \"kubernetes.io/projected/32a75cd6-7dcc-409b-9208-86578c121ec7-kube-api-access-4mjrm\") pod \"cert-manager-webhook-5655c58dd6-dcdmq\" (UID: \"32a75cd6-7dcc-409b-9208-86578c121ec7\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.829968 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzh7g\" (UniqueName: \"kubernetes.io/projected/e64b9328-142b-47be-a2f9-9c2339244683-kube-api-access-rzh7g\") pod \"cert-manager-cainjector-7f985d654d-qn2xj\" (UID: \"e64b9328-142b-47be-a2f9-9c2339244683\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.834860 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56z2h\" (UniqueName: \"kubernetes.io/projected/bc25f619-5720-41cd-9fe6-beb030debe00-kube-api-access-56z2h\") pod \"cert-manager-5b446d88c5-xtps2\" (UID: \"bc25f619-5720-41cd-9fe6-beb030debe00\") " pod="cert-manager/cert-manager-5b446d88c5-xtps2" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.873051 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.883506 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-xtps2" Nov 24 17:13:25 crc kubenswrapper[4760]: I1124 17:13:25.900766 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.136095 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xtps2"] Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.143735 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.167545 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qn2xj"] Nov 24 17:13:26 crc kubenswrapper[4760]: W1124 17:13:26.173474 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode64b9328_142b_47be_a2f9_9c2339244683.slice/crio-1a586ccb677981914855215c32db23a3497a2a0c4b2761c2c228e1e714011001 WatchSource:0}: Error finding container 1a586ccb677981914855215c32db23a3497a2a0c4b2761c2c228e1e714011001: Status 404 returned error can't find the container with id 1a586ccb677981914855215c32db23a3497a2a0c4b2761c2c228e1e714011001 Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.373906 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-dcdmq"] Nov 24 17:13:26 crc kubenswrapper[4760]: W1124 17:13:26.384196 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32a75cd6_7dcc_409b_9208_86578c121ec7.slice/crio-6dd4b39a6e610f6a7e87623869a4977e5785944416992b5296b59aa75e99d23c WatchSource:0}: Error finding container 6dd4b39a6e610f6a7e87623869a4977e5785944416992b5296b59aa75e99d23c: Status 404 returned error can't find the container with id 6dd4b39a6e610f6a7e87623869a4977e5785944416992b5296b59aa75e99d23c Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.402050 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" event={"ID":"e64b9328-142b-47be-a2f9-9c2339244683","Type":"ContainerStarted","Data":"1a586ccb677981914855215c32db23a3497a2a0c4b2761c2c228e1e714011001"} Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.403130 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" event={"ID":"32a75cd6-7dcc-409b-9208-86578c121ec7","Type":"ContainerStarted","Data":"6dd4b39a6e610f6a7e87623869a4977e5785944416992b5296b59aa75e99d23c"} Nov 24 17:13:26 crc kubenswrapper[4760]: I1124 17:13:26.406094 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-xtps2" event={"ID":"bc25f619-5720-41cd-9fe6-beb030debe00","Type":"ContainerStarted","Data":"0f273b239a08809d656a3515877dc489b27029a602f7249676b7ad5eef525bb0"} Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.429681 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" event={"ID":"e64b9328-142b-47be-a2f9-9c2339244683","Type":"ContainerStarted","Data":"40f05136a17883990fc17b2ba877a8e176a9480433d0dc23e31e44a20afd501e"} Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.432158 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" event={"ID":"32a75cd6-7dcc-409b-9208-86578c121ec7","Type":"ContainerStarted","Data":"200f671b25027b1639d4532d5762a1185ab311494c69b1e4e7901b43a20b1b83"} Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.432358 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.434685 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-xtps2" event={"ID":"bc25f619-5720-41cd-9fe6-beb030debe00","Type":"ContainerStarted","Data":"3d10a9779a199d63f8b4b03eb2173128c4dfb39a10035049f480049e5657ce47"} Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.451204 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-qn2xj" podStartSLOduration=2.3699108779999998 podStartE2EDuration="5.451176736s" podCreationTimestamp="2025-11-24 17:13:25 +0000 UTC" firstStartedPulling="2025-11-24 17:13:26.175817019 +0000 UTC m=+601.498698569" lastFinishedPulling="2025-11-24 17:13:29.257082857 +0000 UTC m=+604.579964427" observedRunningTime="2025-11-24 17:13:30.450681662 +0000 UTC m=+605.773563222" watchObservedRunningTime="2025-11-24 17:13:30.451176736 +0000 UTC m=+605.774058326" Nov 24 17:13:30 crc kubenswrapper[4760]: I1124 17:13:30.473753 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" podStartSLOduration=2.550567472 podStartE2EDuration="5.473731317s" podCreationTimestamp="2025-11-24 17:13:25 +0000 UTC" firstStartedPulling="2025-11-24 17:13:26.385481885 +0000 UTC m=+601.708363435" lastFinishedPulling="2025-11-24 17:13:29.30864572 +0000 UTC m=+604.631527280" observedRunningTime="2025-11-24 17:13:30.4688347 +0000 UTC m=+605.791716280" watchObservedRunningTime="2025-11-24 17:13:30.473731317 +0000 UTC m=+605.796612907" Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.643058 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.643441 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.643515 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.907603 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-xtps2" podStartSLOduration=7.8013733 podStartE2EDuration="10.907585756s" podCreationTimestamp="2025-11-24 17:13:25 +0000 UTC" firstStartedPulling="2025-11-24 17:13:26.143540876 +0000 UTC m=+601.466422426" lastFinishedPulling="2025-11-24 17:13:29.249753322 +0000 UTC m=+604.572634882" observedRunningTime="2025-11-24 17:13:30.494667682 +0000 UTC m=+605.817549262" watchObservedRunningTime="2025-11-24 17:13:35.907585756 +0000 UTC m=+611.230467306" Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.909034 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-dcdmq" Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.909068 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t55f2"] Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.909834 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-controller" containerID="cri-o://d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910400 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="northd" containerID="cri-o://28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910566 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="sbdb" containerID="cri-o://cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910642 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="nbdb" containerID="cri-o://9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910750 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-node" containerID="cri-o://d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910868 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.910978 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-acl-logging" containerID="cri-o://70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" gracePeriod=30 Nov 24 17:13:35 crc kubenswrapper[4760]: I1124 17:13:35.955897 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" containerID="cri-o://6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" gracePeriod=30 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.246939 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/3.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.249592 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovn-acl-logging/0.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.250805 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovn-controller/0.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.251360 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259519 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259555 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259578 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259595 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259616 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259632 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259680 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259701 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259717 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259734 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259750 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259770 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259802 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259824 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259838 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259890 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85wkh\" (UniqueName: \"kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259908 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259922 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259942 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.259964 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd\") pod \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\" (UID: \"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b\") " Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260053 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260164 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260207 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260228 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260260 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260276 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260284 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260296 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket" (OuterVolumeSpecName: "log-socket") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260334 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260377 4760 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260380 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260389 4760 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260438 4760 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260467 4760 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260494 4760 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-log-socket\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260518 4760 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260541 4760 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260562 4760 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260406 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260419 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260239 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260594 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log" (OuterVolumeSpecName: "node-log") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.260663 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash" (OuterVolumeSpecName: "host-slash") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.261079 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.261292 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.265638 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh" (OuterVolumeSpecName: "kube-api-access-85wkh") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "kube-api-access-85wkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.267041 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.273458 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" (UID: "a1ccc7f2-1c1b-42b4-aac5-a9865757a92b"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303100 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zmfz2"] Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303370 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303397 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303413 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-node" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303424 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-node" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303444 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303455 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303469 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="sbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303478 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="sbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303490 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303500 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303512 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-acl-logging" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303521 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-acl-logging" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303537 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303547 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303562 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="northd" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303572 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="northd" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303588 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303598 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303609 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kubecfg-setup" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303620 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kubecfg-setup" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.303639 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="nbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303648 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="nbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303825 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303842 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="northd" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303855 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303868 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovn-acl-logging" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303883 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-node" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303896 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303911 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303925 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303937 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303952 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303969 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="nbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.303983 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="sbdb" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.304148 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.304161 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.304179 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.304190 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerName="ovnkube-controller" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.307781 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361097 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-kubelet\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361190 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-netns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361216 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-ovn\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361252 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361282 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-netd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361366 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-node-log\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361489 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-slash\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361603 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361642 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-var-lib-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361670 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-env-overrides\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361703 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-systemd-units\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361737 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-etc-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361755 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-systemd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361831 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfxns\" (UniqueName: \"kubernetes.io/projected/97dff3af-9edf-4f32-a200-4362ca265172-kube-api-access-kfxns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361928 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-script-lib\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361954 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-log-socket\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.361979 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/97dff3af-9edf-4f32-a200-4362ca265172-ovn-node-metrics-cert\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362033 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-config\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362093 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362199 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-bin\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362315 4760 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362337 4760 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362362 4760 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362387 4760 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362414 4760 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362441 4760 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-slash\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362461 4760 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362484 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85wkh\" (UniqueName: \"kubernetes.io/projected/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-kube-api-access-85wkh\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362505 4760 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362523 4760 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-node-log\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362540 4760 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.362558 4760 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463599 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463717 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-bin\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463768 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-ovn\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463792 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-kubelet\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463815 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-netns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463848 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463875 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-netd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463911 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-node-log\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.463951 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-slash\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464126 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-slash\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464830 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-node-log\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464823 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464886 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-var-lib-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464884 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464910 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-env-overrides\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.464956 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-ovn\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465045 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-systemd-units\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465079 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-netd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465096 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-kubelet\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465094 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465113 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-run-netns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465132 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-var-lib-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465167 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-host-cni-bin\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465698 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-env-overrides\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465740 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465764 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-systemd-units\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465859 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-etc-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465882 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-systemd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465909 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfxns\" (UniqueName: \"kubernetes.io/projected/97dff3af-9edf-4f32-a200-4362ca265172-kube-api-access-kfxns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465946 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-script-lib\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.465982 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-log-socket\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.466034 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/97dff3af-9edf-4f32-a200-4362ca265172-ovn-node-metrics-cert\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.466067 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-config\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.467253 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-config\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.469850 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-etc-openvswitch\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.469885 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-run-systemd\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.470510 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/97dff3af-9edf-4f32-a200-4362ca265172-log-socket\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.472214 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/97dff3af-9edf-4f32-a200-4362ca265172-ovnkube-script-lib\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.482488 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/97dff3af-9edf-4f32-a200-4362ca265172-ovn-node-metrics-cert\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.485129 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovnkube-controller/3.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.487643 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfxns\" (UniqueName: \"kubernetes.io/projected/97dff3af-9edf-4f32-a200-4362ca265172-kube-api-access-kfxns\") pod \"ovnkube-node-zmfz2\" (UID: \"97dff3af-9edf-4f32-a200-4362ca265172\") " pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.489268 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovn-acl-logging/0.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.489799 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-t55f2_a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/ovn-controller/0.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490352 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490408 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490428 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490444 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490462 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490481 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" exitCode=0 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490497 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" exitCode=143 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490491 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490568 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490592 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490611 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490615 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490770 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490802 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490515 4760 generic.go:334] "Generic (PLEG): container finished" podID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" exitCode=143 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490822 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490940 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490953 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490966 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490977 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490988 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490998 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.490501 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491030 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491142 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491178 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491210 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491229 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491244 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491261 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491276 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491291 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491303 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491314 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491325 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491336 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491353 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491371 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491390 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491404 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491418 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491432 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491446 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491464 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491479 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491493 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491506 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491528 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t55f2" event={"ID":"a1ccc7f2-1c1b-42b4-aac5-a9865757a92b","Type":"ContainerDied","Data":"f0d515499e3c7cabe4e35991e6813204363fa60080f8c63c2d36630e178aa82f"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491551 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491564 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491575 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491587 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491597 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491611 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491625 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491639 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491692 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.491706 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.494391 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/2.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.496178 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/1.log" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.496321 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" containerID="70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc" exitCode=2 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.496589 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerDied","Data":"70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.496669 4760 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea"} Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.497513 4760 scope.go:117] "RemoveContainer" containerID="70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.497896 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8x59s_openshift-multus(ea01e72c-3c1c-465f-a4cb-90eb34c2f871)\"" pod="openshift-multus/multus-8x59s" podUID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.497652 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.498568 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3" gracePeriod=600 Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.528706 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.562344 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t55f2"] Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.568072 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t55f2"] Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.582352 4760 scope.go:117] "RemoveContainer" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.598913 4760 scope.go:117] "RemoveContainer" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.619252 4760 scope.go:117] "RemoveContainer" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.621483 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.641588 4760 scope.go:117] "RemoveContainer" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.685224 4760 scope.go:117] "RemoveContainer" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.709201 4760 scope.go:117] "RemoveContainer" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.751953 4760 scope.go:117] "RemoveContainer" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.770748 4760 scope.go:117] "RemoveContainer" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.787219 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.787545 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.787580 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} err="failed to get container status \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.787607 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.787887 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": container with ID starting with 2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea not found: ID does not exist" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.787940 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} err="failed to get container status \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": rpc error: code = NotFound desc = could not find container \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": container with ID starting with 2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.787978 4760 scope.go:117] "RemoveContainer" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.788460 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": container with ID starting with cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6 not found: ID does not exist" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.788500 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} err="failed to get container status \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": rpc error: code = NotFound desc = could not find container \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": container with ID starting with cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.788526 4760 scope.go:117] "RemoveContainer" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.788908 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": container with ID starting with 9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599 not found: ID does not exist" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.788981 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} err="failed to get container status \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": rpc error: code = NotFound desc = could not find container \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": container with ID starting with 9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.789034 4760 scope.go:117] "RemoveContainer" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.789385 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": container with ID starting with 28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb not found: ID does not exist" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.789424 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} err="failed to get container status \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": rpc error: code = NotFound desc = could not find container \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": container with ID starting with 28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.789446 4760 scope.go:117] "RemoveContainer" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.789816 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": container with ID starting with dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685 not found: ID does not exist" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.789872 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} err="failed to get container status \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": rpc error: code = NotFound desc = could not find container \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": container with ID starting with dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.789911 4760 scope.go:117] "RemoveContainer" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.790279 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": container with ID starting with d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7 not found: ID does not exist" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.790345 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} err="failed to get container status \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": rpc error: code = NotFound desc = could not find container \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": container with ID starting with d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.790385 4760 scope.go:117] "RemoveContainer" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.790945 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": container with ID starting with 70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0 not found: ID does not exist" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.790967 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} err="failed to get container status \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": rpc error: code = NotFound desc = could not find container \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": container with ID starting with 70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.790988 4760 scope.go:117] "RemoveContainer" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.791510 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": container with ID starting with d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1 not found: ID does not exist" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.791582 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} err="failed to get container status \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": rpc error: code = NotFound desc = could not find container \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": container with ID starting with d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.791628 4760 scope.go:117] "RemoveContainer" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: E1124 17:13:36.792038 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": container with ID starting with 6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4 not found: ID does not exist" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.792100 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} err="failed to get container status \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": rpc error: code = NotFound desc = could not find container \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": container with ID starting with 6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.792143 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.792487 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} err="failed to get container status \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.792551 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.793148 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} err="failed to get container status \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": rpc error: code = NotFound desc = could not find container \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": container with ID starting with 2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.793179 4760 scope.go:117] "RemoveContainer" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.793772 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} err="failed to get container status \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": rpc error: code = NotFound desc = could not find container \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": container with ID starting with cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.793832 4760 scope.go:117] "RemoveContainer" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.794150 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} err="failed to get container status \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": rpc error: code = NotFound desc = could not find container \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": container with ID starting with 9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.794175 4760 scope.go:117] "RemoveContainer" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.794667 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} err="failed to get container status \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": rpc error: code = NotFound desc = could not find container \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": container with ID starting with 28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.794689 4760 scope.go:117] "RemoveContainer" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.794991 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} err="failed to get container status \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": rpc error: code = NotFound desc = could not find container \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": container with ID starting with dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.795079 4760 scope.go:117] "RemoveContainer" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.795419 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} err="failed to get container status \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": rpc error: code = NotFound desc = could not find container \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": container with ID starting with d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.795444 4760 scope.go:117] "RemoveContainer" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.795860 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} err="failed to get container status \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": rpc error: code = NotFound desc = could not find container \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": container with ID starting with 70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.795895 4760 scope.go:117] "RemoveContainer" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796185 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} err="failed to get container status \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": rpc error: code = NotFound desc = could not find container \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": container with ID starting with d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796208 4760 scope.go:117] "RemoveContainer" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796465 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} err="failed to get container status \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": rpc error: code = NotFound desc = could not find container \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": container with ID starting with 6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796492 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796768 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} err="failed to get container status \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.796789 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.797238 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} err="failed to get container status \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": rpc error: code = NotFound desc = could not find container \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": container with ID starting with 2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.797274 4760 scope.go:117] "RemoveContainer" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.797573 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} err="failed to get container status \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": rpc error: code = NotFound desc = could not find container \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": container with ID starting with cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.797596 4760 scope.go:117] "RemoveContainer" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.798039 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} err="failed to get container status \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": rpc error: code = NotFound desc = could not find container \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": container with ID starting with 9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.798078 4760 scope.go:117] "RemoveContainer" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.798368 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} err="failed to get container status \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": rpc error: code = NotFound desc = could not find container \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": container with ID starting with 28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.798400 4760 scope.go:117] "RemoveContainer" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.799405 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} err="failed to get container status \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": rpc error: code = NotFound desc = could not find container \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": container with ID starting with dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.799432 4760 scope.go:117] "RemoveContainer" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.799901 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} err="failed to get container status \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": rpc error: code = NotFound desc = could not find container \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": container with ID starting with d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.799941 4760 scope.go:117] "RemoveContainer" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.800344 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} err="failed to get container status \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": rpc error: code = NotFound desc = could not find container \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": container with ID starting with 70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.800369 4760 scope.go:117] "RemoveContainer" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801152 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} err="failed to get container status \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": rpc error: code = NotFound desc = could not find container \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": container with ID starting with d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801191 4760 scope.go:117] "RemoveContainer" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801492 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} err="failed to get container status \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": rpc error: code = NotFound desc = could not find container \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": container with ID starting with 6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801517 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801839 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} err="failed to get container status \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.801861 4760 scope.go:117] "RemoveContainer" containerID="2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.802218 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea"} err="failed to get container status \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": rpc error: code = NotFound desc = could not find container \"2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea\": container with ID starting with 2f7ad5866a3cbf8f8d59820fe4e162d0c0cf2836e39e360c843d9660357a93ea not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.802280 4760 scope.go:117] "RemoveContainer" containerID="cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.802963 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6"} err="failed to get container status \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": rpc error: code = NotFound desc = could not find container \"cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6\": container with ID starting with cd2fe45d84e0f71d9b88240b3ab74520b85a019a592d0e107d33bb268af74dd6 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.802983 4760 scope.go:117] "RemoveContainer" containerID="9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.803918 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599"} err="failed to get container status \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": rpc error: code = NotFound desc = could not find container \"9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599\": container with ID starting with 9cf264f82f68b1ec7a9cacec1bd8c0e6016db16198d2e3e0b8b6bfdd674fa599 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.803947 4760 scope.go:117] "RemoveContainer" containerID="28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.804235 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb"} err="failed to get container status \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": rpc error: code = NotFound desc = could not find container \"28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb\": container with ID starting with 28173b09f6de0598a5784c02e5c4c343a472c0bf04da814f8d04d8f63e13b2bb not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.804271 4760 scope.go:117] "RemoveContainer" containerID="dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.804693 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685"} err="failed to get container status \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": rpc error: code = NotFound desc = could not find container \"dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685\": container with ID starting with dbd99e9f7e49827036051a2d0183cb01860e7b221522cb434295a08af31dd685 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.804721 4760 scope.go:117] "RemoveContainer" containerID="d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.805233 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7"} err="failed to get container status \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": rpc error: code = NotFound desc = could not find container \"d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7\": container with ID starting with d67a9695a96af23a36709731bb2310b2dacf42de212b164cf61cb6b2b9c4a4c7 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.805254 4760 scope.go:117] "RemoveContainer" containerID="70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.805697 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0"} err="failed to get container status \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": rpc error: code = NotFound desc = could not find container \"70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0\": container with ID starting with 70f14f362d8a913cbcdebed7cec5ef8121e00edbb4e9e5d19fc464082a33bee0 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.805721 4760 scope.go:117] "RemoveContainer" containerID="d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.805980 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1"} err="failed to get container status \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": rpc error: code = NotFound desc = could not find container \"d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1\": container with ID starting with d87fcc21659da690b918a195af5a5f169b13cdebc96f4e4bfa4b419a407d9eb1 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.806017 4760 scope.go:117] "RemoveContainer" containerID="6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.806482 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4"} err="failed to get container status \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": rpc error: code = NotFound desc = could not find container \"6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4\": container with ID starting with 6f88d829ab9f413886e97412052cdf733166d5f0b9815aa0a74cd94ddb6defd4 not found: ID does not exist" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.806505 4760 scope.go:117] "RemoveContainer" containerID="6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62" Nov 24 17:13:36 crc kubenswrapper[4760]: I1124 17:13:36.806749 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62"} err="failed to get container status \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": rpc error: code = NotFound desc = could not find container \"6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62\": container with ID starting with 6e233a2adb957f4a4b8132e724c47ab288784f7face013308b1b53355633fe62 not found: ID does not exist" Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.477220 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1ccc7f2-1c1b-42b4-aac5-a9865757a92b" path="/var/lib/kubelet/pods/a1ccc7f2-1c1b-42b4-aac5-a9865757a92b/volumes" Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.504802 4760 generic.go:334] "Generic (PLEG): container finished" podID="97dff3af-9edf-4f32-a200-4362ca265172" containerID="097e2af920409f1dfc919708ad56b9b469d399e34e3278012010d99784410c9d" exitCode=0 Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.504864 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerDied","Data":"097e2af920409f1dfc919708ad56b9b469d399e34e3278012010d99784410c9d"} Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.504965 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"31b9cc82f1e183e3af3cc91756d35c4a300ca05fca86e94e52050fe87920593a"} Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.508508 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3" exitCode=0 Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.508542 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3"} Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.508572 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656"} Nov 24 17:13:37 crc kubenswrapper[4760]: I1124 17:13:37.508592 4760 scope.go:117] "RemoveContainer" containerID="c890ab30decc89bd18031b40b32e3fbedd7cc15c8392d95c3f21ddab1b02a8fb" Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525090 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"d8a24e4c862c69cd81f5cef03f4a678a87eccc5acff6da77226fcc51995303e5"} Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525819 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"2dc96827b6548d8281ee3fe0394b5d9a656be0b174e72bf7432878c83055847c"} Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525838 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"3688841e6a5033a0ff93538011fadc62c9c9968c47f0cfe7db9124b57f1ab7e1"} Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525878 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"43570a41ef81307b54c5335286bc1b8c693acbfb74e08dec4aa7f3439988bbc4"} Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525891 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"fc3994233f48ad8460ff2a98bd4b2f80dd07a6a0e88875fee8b82d2bd4774389"} Nov 24 17:13:38 crc kubenswrapper[4760]: I1124 17:13:38.525905 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"39bfa1e899de0948cc9eb8a86fc7edd17320a899ae5a448cd319414746a5a790"} Nov 24 17:13:41 crc kubenswrapper[4760]: I1124 17:13:41.556449 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"1a10a68289ffcf2d782f331628bca0a862a724fc77635110b883dcead7ef9bf1"} Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.573147 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" event={"ID":"97dff3af-9edf-4f32-a200-4362ca265172","Type":"ContainerStarted","Data":"8f3613507abac901052858dc4a3e04a1abd36ae90fdff0ef35a5d72803eea6c4"} Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.573511 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.573652 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.573693 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.612635 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" podStartSLOduration=7.612611921 podStartE2EDuration="7.612611921s" podCreationTimestamp="2025-11-24 17:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:13:43.610652556 +0000 UTC m=+618.933534146" watchObservedRunningTime="2025-11-24 17:13:43.612611921 +0000 UTC m=+618.935493511" Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.618733 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:43 crc kubenswrapper[4760]: I1124 17:13:43.618895 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:13:48 crc kubenswrapper[4760]: I1124 17:13:48.466448 4760 scope.go:117] "RemoveContainer" containerID="70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc" Nov 24 17:13:48 crc kubenswrapper[4760]: E1124 17:13:48.467302 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8x59s_openshift-multus(ea01e72c-3c1c-465f-a4cb-90eb34c2f871)\"" pod="openshift-multus/multus-8x59s" podUID="ea01e72c-3c1c-465f-a4cb-90eb34c2f871" Nov 24 17:13:59 crc kubenswrapper[4760]: I1124 17:13:59.467871 4760 scope.go:117] "RemoveContainer" containerID="70516c5f47799f0ece36f692634fe011328322a1cc75c42e9af99e7a48eceacc" Nov 24 17:14:00 crc kubenswrapper[4760]: I1124 17:14:00.705475 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/2.log" Nov 24 17:14:00 crc kubenswrapper[4760]: I1124 17:14:00.708067 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/1.log" Nov 24 17:14:00 crc kubenswrapper[4760]: I1124 17:14:00.708143 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8x59s" event={"ID":"ea01e72c-3c1c-465f-a4cb-90eb34c2f871","Type":"ContainerStarted","Data":"6091930c0d67fdbf60f5dbd1c589a6a3ac68f502dc043a3af26bdec984108978"} Nov 24 17:14:06 crc kubenswrapper[4760]: I1124 17:14:06.655447 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zmfz2" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.382690 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn"] Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.385125 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.387675 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.395895 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn"] Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.400109 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvqgs\" (UniqueName: \"kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.400149 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.400181 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.501912 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvqgs\" (UniqueName: \"kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.502033 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.502098 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.502968 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.503111 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.533322 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvqgs\" (UniqueName: \"kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.701247 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:20 crc kubenswrapper[4760]: I1124 17:14:20.963518 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn"] Nov 24 17:14:20 crc kubenswrapper[4760]: W1124 17:14:20.976407 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f244ccb_ab02_43bc_8cd5_645c33d953b9.slice/crio-42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a WatchSource:0}: Error finding container 42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a: Status 404 returned error can't find the container with id 42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a Nov 24 17:14:21 crc kubenswrapper[4760]: I1124 17:14:21.856599 4760 generic.go:334] "Generic (PLEG): container finished" podID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerID="af6c916b2018c9c1da1e3ec3aec95800ad0dff11f4721fea8a6c5b3767e30b26" exitCode=0 Nov 24 17:14:21 crc kubenswrapper[4760]: I1124 17:14:21.856706 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" event={"ID":"0f244ccb-ab02-43bc-8cd5-645c33d953b9","Type":"ContainerDied","Data":"af6c916b2018c9c1da1e3ec3aec95800ad0dff11f4721fea8a6c5b3767e30b26"} Nov 24 17:14:21 crc kubenswrapper[4760]: I1124 17:14:21.860369 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" event={"ID":"0f244ccb-ab02-43bc-8cd5-645c33d953b9","Type":"ContainerStarted","Data":"42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a"} Nov 24 17:14:23 crc kubenswrapper[4760]: I1124 17:14:23.873774 4760 generic.go:334] "Generic (PLEG): container finished" podID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerID="b941242065a51b91a26a62ab99e822127e45acd6d78761dd47f410e024534f24" exitCode=0 Nov 24 17:14:23 crc kubenswrapper[4760]: I1124 17:14:23.873827 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" event={"ID":"0f244ccb-ab02-43bc-8cd5-645c33d953b9","Type":"ContainerDied","Data":"b941242065a51b91a26a62ab99e822127e45acd6d78761dd47f410e024534f24"} Nov 24 17:14:24 crc kubenswrapper[4760]: I1124 17:14:24.885338 4760 generic.go:334] "Generic (PLEG): container finished" podID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerID="ccad27274b7800abab9407d3dbf71ee06795f12dd33d30131731d12773808011" exitCode=0 Nov 24 17:14:24 crc kubenswrapper[4760]: I1124 17:14:24.885516 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" event={"ID":"0f244ccb-ab02-43bc-8cd5-645c33d953b9","Type":"ContainerDied","Data":"ccad27274b7800abab9407d3dbf71ee06795f12dd33d30131731d12773808011"} Nov 24 17:14:25 crc kubenswrapper[4760]: I1124 17:14:25.701989 4760 scope.go:117] "RemoveContainer" containerID="00f08704723fedb79aee00deeba7d808bb28deb7d4a3a354060e22e118c980ea" Nov 24 17:14:25 crc kubenswrapper[4760]: I1124 17:14:25.896072 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8x59s_ea01e72c-3c1c-465f-a4cb-90eb34c2f871/kube-multus/2.log" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.197293 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.374609 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvqgs\" (UniqueName: \"kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs\") pod \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.374700 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle\") pod \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.374766 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util\") pod \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\" (UID: \"0f244ccb-ab02-43bc-8cd5-645c33d953b9\") " Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.375891 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle" (OuterVolumeSpecName: "bundle") pod "0f244ccb-ab02-43bc-8cd5-645c33d953b9" (UID: "0f244ccb-ab02-43bc-8cd5-645c33d953b9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.383016 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs" (OuterVolumeSpecName: "kube-api-access-fvqgs") pod "0f244ccb-ab02-43bc-8cd5-645c33d953b9" (UID: "0f244ccb-ab02-43bc-8cd5-645c33d953b9"). InnerVolumeSpecName "kube-api-access-fvqgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.476130 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvqgs\" (UniqueName: \"kubernetes.io/projected/0f244ccb-ab02-43bc-8cd5-645c33d953b9-kube-api-access-fvqgs\") on node \"crc\" DevicePath \"\"" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.476186 4760 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.521005 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util" (OuterVolumeSpecName: "util") pod "0f244ccb-ab02-43bc-8cd5-645c33d953b9" (UID: "0f244ccb-ab02-43bc-8cd5-645c33d953b9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.578004 4760 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0f244ccb-ab02-43bc-8cd5-645c33d953b9-util\") on node \"crc\" DevicePath \"\"" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.907468 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" event={"ID":"0f244ccb-ab02-43bc-8cd5-645c33d953b9","Type":"ContainerDied","Data":"42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a"} Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.907914 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42cfe0e4a0966588c4b6f9d6fcce4e1ae2ccc0408a28cba62f2312f42801449a" Nov 24 17:14:26 crc kubenswrapper[4760]: I1124 17:14:26.907544 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.230513 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-zn9lc"] Nov 24 17:14:29 crc kubenswrapper[4760]: E1124 17:14:29.230744 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="pull" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.230758 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="pull" Nov 24 17:14:29 crc kubenswrapper[4760]: E1124 17:14:29.230770 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="extract" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.230777 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="extract" Nov 24 17:14:29 crc kubenswrapper[4760]: E1124 17:14:29.230790 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="util" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.230798 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="util" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.230918 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f244ccb-ab02-43bc-8cd5-645c33d953b9" containerName="extract" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.231292 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.233558 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-7dbmq" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.234716 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.241256 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.254786 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-zn9lc"] Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.414341 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmp7z\" (UniqueName: \"kubernetes.io/projected/60a6fd21-1e4a-4eab-940a-157de6e7236e-kube-api-access-rmp7z\") pod \"nmstate-operator-557fdffb88-zn9lc\" (UID: \"60a6fd21-1e4a-4eab-940a-157de6e7236e\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.515314 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmp7z\" (UniqueName: \"kubernetes.io/projected/60a6fd21-1e4a-4eab-940a-157de6e7236e-kube-api-access-rmp7z\") pod \"nmstate-operator-557fdffb88-zn9lc\" (UID: \"60a6fd21-1e4a-4eab-940a-157de6e7236e\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.545238 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmp7z\" (UniqueName: \"kubernetes.io/projected/60a6fd21-1e4a-4eab-940a-157de6e7236e-kube-api-access-rmp7z\") pod \"nmstate-operator-557fdffb88-zn9lc\" (UID: \"60a6fd21-1e4a-4eab-940a-157de6e7236e\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.547710 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.828843 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-zn9lc"] Nov 24 17:14:29 crc kubenswrapper[4760]: I1124 17:14:29.929528 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" event={"ID":"60a6fd21-1e4a-4eab-940a-157de6e7236e","Type":"ContainerStarted","Data":"49dfc6f26499d028dc6d81b1daf7eabcb486a997fc2fdcb363a76c720d6c1431"} Nov 24 17:14:31 crc kubenswrapper[4760]: I1124 17:14:31.943546 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" event={"ID":"60a6fd21-1e4a-4eab-940a-157de6e7236e","Type":"ContainerStarted","Data":"8f17ade9de0b1358b157c89136e9d025635bb1257cd9d8cc3aa4c59d3a404e49"} Nov 24 17:14:31 crc kubenswrapper[4760]: I1124 17:14:31.963682 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-zn9lc" podStartSLOduration=1.017173066 podStartE2EDuration="2.963664083s" podCreationTimestamp="2025-11-24 17:14:29 +0000 UTC" firstStartedPulling="2025-11-24 17:14:29.839918993 +0000 UTC m=+665.162800543" lastFinishedPulling="2025-11-24 17:14:31.78641001 +0000 UTC m=+667.109291560" observedRunningTime="2025-11-24 17:14:31.962555112 +0000 UTC m=+667.285436662" watchObservedRunningTime="2025-11-24 17:14:31.963664083 +0000 UTC m=+667.286545633" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.519211 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.520698 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.523090 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-hkpbm" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.542405 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.543235 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.546599 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.557637 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.575534 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-nl86x"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.576452 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.638142 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669550 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669606 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmfj9\" (UniqueName: \"kubernetes.io/projected/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-kube-api-access-zmfj9\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669631 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrcj8\" (UniqueName: \"kubernetes.io/projected/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-kube-api-access-lrcj8\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669668 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-nmstate-lock\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669686 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-ovs-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669792 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-dbus-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.669879 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f69sx\" (UniqueName: \"kubernetes.io/projected/c02c0ef6-ec2f-4554-89d5-95ccd5a9af05-kube-api-access-f69sx\") pod \"nmstate-metrics-5dcf9c57c5-tgfj2\" (UID: \"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.689957 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.690625 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.691937 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.692828 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-vp67c" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.692842 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.697252 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770612 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5228db69-23c1-48fa-a89f-a4e0459bcdec-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770713 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-nmstate-lock\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770748 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-ovs-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770789 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxb8m\" (UniqueName: \"kubernetes.io/projected/5228db69-23c1-48fa-a89f-a4e0459bcdec-kube-api-access-cxb8m\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770829 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-dbus-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770884 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f69sx\" (UniqueName: \"kubernetes.io/projected/c02c0ef6-ec2f-4554-89d5-95ccd5a9af05-kube-api-access-f69sx\") pod \"nmstate-metrics-5dcf9c57c5-tgfj2\" (UID: \"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770935 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.770968 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5228db69-23c1-48fa-a89f-a4e0459bcdec-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.771056 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmfj9\" (UniqueName: \"kubernetes.io/projected/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-kube-api-access-zmfj9\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.771129 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrcj8\" (UniqueName: \"kubernetes.io/projected/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-kube-api-access-lrcj8\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.771617 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-nmstate-lock\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.771690 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-ovs-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.771945 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-dbus-socket\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.778689 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.786970 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrcj8\" (UniqueName: \"kubernetes.io/projected/2a8eb5c7-a2fa-4029-9d10-9ef82f358506-kube-api-access-lrcj8\") pod \"nmstate-webhook-6b89b748d8-dgm2z\" (UID: \"2a8eb5c7-a2fa-4029-9d10-9ef82f358506\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.790609 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmfj9\" (UniqueName: \"kubernetes.io/projected/fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3-kube-api-access-zmfj9\") pod \"nmstate-handler-nl86x\" (UID: \"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3\") " pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.797804 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f69sx\" (UniqueName: \"kubernetes.io/projected/c02c0ef6-ec2f-4554-89d5-95ccd5a9af05-kube-api-access-f69sx\") pod \"nmstate-metrics-5dcf9c57c5-tgfj2\" (UID: \"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.838820 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.859319 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.869386 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6866949b48-d9pbn"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.870155 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.875232 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5228db69-23c1-48fa-a89f-a4e0459bcdec-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.875287 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5228db69-23c1-48fa-a89f-a4e0459bcdec-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.875324 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxb8m\" (UniqueName: \"kubernetes.io/projected/5228db69-23c1-48fa-a89f-a4e0459bcdec-kube-api-access-cxb8m\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.876693 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5228db69-23c1-48fa-a89f-a4e0459bcdec-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.883666 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6866949b48-d9pbn"] Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.900641 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxb8m\" (UniqueName: \"kubernetes.io/projected/5228db69-23c1-48fa-a89f-a4e0459bcdec-kube-api-access-cxb8m\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.900962 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.915835 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5228db69-23c1-48fa-a89f-a4e0459bcdec-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-w6tj7\" (UID: \"5228db69-23c1-48fa-a89f-a4e0459bcdec\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:40 crc kubenswrapper[4760]: W1124 17:14:40.937591 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe888a38_6db0_4cc7_b0e6_7eeb5ecbd7a3.slice/crio-6a1530076633f7a879f6904d7696865ee27697c986da1ce5383eb3774aefacea WatchSource:0}: Error finding container 6a1530076633f7a879f6904d7696865ee27697c986da1ce5383eb3774aefacea: Status 404 returned error can't find the container with id 6a1530076633f7a879f6904d7696865ee27697c986da1ce5383eb3774aefacea Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977163 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6zr8\" (UniqueName: \"kubernetes.io/projected/b2b8d44b-f031-456a-981a-b07204625a4b-kube-api-access-c6zr8\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977368 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977388 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-oauth-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977545 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-service-ca\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977584 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-console-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977607 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-trusted-ca-bundle\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:40 crc kubenswrapper[4760]: I1124 17:14:40.977722 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-oauth-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.001224 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-nl86x" event={"ID":"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3","Type":"ContainerStarted","Data":"6a1530076633f7a879f6904d7696865ee27697c986da1ce5383eb3774aefacea"} Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.005793 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079319 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079361 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-oauth-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079385 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-service-ca\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079405 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-console-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079426 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-trusted-ca-bundle\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079450 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-oauth-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.079471 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6zr8\" (UniqueName: \"kubernetes.io/projected/b2b8d44b-f031-456a-981a-b07204625a4b-kube-api-access-c6zr8\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.080499 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-oauth-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.081020 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-service-ca\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.081043 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-console-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.081617 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b2b8d44b-f031-456a-981a-b07204625a4b-trusted-ca-bundle\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.083993 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-serving-cert\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.084813 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b2b8d44b-f031-456a-981a-b07204625a4b-console-oauth-config\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.092417 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z"] Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.097838 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6zr8\" (UniqueName: \"kubernetes.io/projected/b2b8d44b-f031-456a-981a-b07204625a4b-kube-api-access-c6zr8\") pod \"console-6866949b48-d9pbn\" (UID: \"b2b8d44b-f031-456a-981a-b07204625a4b\") " pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.187883 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7"] Nov 24 17:14:41 crc kubenswrapper[4760]: W1124 17:14:41.192073 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5228db69_23c1_48fa_a89f_a4e0459bcdec.slice/crio-bf866c09c16668c744ac79a23c1cf12e6de363f2db5c9dfc2fa7ff3e44f22054 WatchSource:0}: Error finding container bf866c09c16668c744ac79a23c1cf12e6de363f2db5c9dfc2fa7ff3e44f22054: Status 404 returned error can't find the container with id bf866c09c16668c744ac79a23c1cf12e6de363f2db5c9dfc2fa7ff3e44f22054 Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.265672 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2"] Nov 24 17:14:41 crc kubenswrapper[4760]: W1124 17:14:41.271298 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc02c0ef6_ec2f_4554_89d5_95ccd5a9af05.slice/crio-955ec0bad52272c0762547142d5bab640bd15bc67fd9f510db525669ff1f3309 WatchSource:0}: Error finding container 955ec0bad52272c0762547142d5bab640bd15bc67fd9f510db525669ff1f3309: Status 404 returned error can't find the container with id 955ec0bad52272c0762547142d5bab640bd15bc67fd9f510db525669ff1f3309 Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.272705 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:41 crc kubenswrapper[4760]: I1124 17:14:41.679948 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6866949b48-d9pbn"] Nov 24 17:14:41 crc kubenswrapper[4760]: W1124 17:14:41.685052 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2b8d44b_f031_456a_981a_b07204625a4b.slice/crio-7bade063a229f7a1da64e8f5f08c20a2a04263ed95ad464fd83ac819d2e3ac26 WatchSource:0}: Error finding container 7bade063a229f7a1da64e8f5f08c20a2a04263ed95ad464fd83ac819d2e3ac26: Status 404 returned error can't find the container with id 7bade063a229f7a1da64e8f5f08c20a2a04263ed95ad464fd83ac819d2e3ac26 Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.008895 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" event={"ID":"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05","Type":"ContainerStarted","Data":"955ec0bad52272c0762547142d5bab640bd15bc67fd9f510db525669ff1f3309"} Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.010829 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6866949b48-d9pbn" event={"ID":"b2b8d44b-f031-456a-981a-b07204625a4b","Type":"ContainerStarted","Data":"4ea7d1a239f791bfde4a8c0ef2665f5bd8c9fe1cdf912cd07de1b1df0bb1d44d"} Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.010864 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6866949b48-d9pbn" event={"ID":"b2b8d44b-f031-456a-981a-b07204625a4b","Type":"ContainerStarted","Data":"7bade063a229f7a1da64e8f5f08c20a2a04263ed95ad464fd83ac819d2e3ac26"} Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.012072 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" event={"ID":"2a8eb5c7-a2fa-4029-9d10-9ef82f358506","Type":"ContainerStarted","Data":"35ecc07ed83e625910b9f4dd11d87a221acafb28e8e2cf9866f9492893c9b238"} Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.013111 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" event={"ID":"5228db69-23c1-48fa-a89f-a4e0459bcdec","Type":"ContainerStarted","Data":"bf866c09c16668c744ac79a23c1cf12e6de363f2db5c9dfc2fa7ff3e44f22054"} Nov 24 17:14:42 crc kubenswrapper[4760]: I1124 17:14:42.031360 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6866949b48-d9pbn" podStartSLOduration=2.03131119 podStartE2EDuration="2.03131119s" podCreationTimestamp="2025-11-24 17:14:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:14:42.031247018 +0000 UTC m=+677.354128588" watchObservedRunningTime="2025-11-24 17:14:42.03131119 +0000 UTC m=+677.354192750" Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.046306 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" event={"ID":"2a8eb5c7-a2fa-4029-9d10-9ef82f358506","Type":"ContainerStarted","Data":"184a53582d17a35bbc2f586cf31e95ba1abfee83031d5300a031f2d391f45c03"} Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.046980 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.049418 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" event={"ID":"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05","Type":"ContainerStarted","Data":"abc25654167defcae368a4bdf4f4d5cc5b1cf5976b0776a72e8ee675a07f7410"} Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.051614 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" event={"ID":"5228db69-23c1-48fa-a89f-a4e0459bcdec","Type":"ContainerStarted","Data":"f01a3ec0db5894df09c44dc5c7ab41866b4061d5561800e9242c300a935e520e"} Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.053708 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-nl86x" event={"ID":"fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3","Type":"ContainerStarted","Data":"0e18e3f6a25974147f98789027763f29b84d8859894d097792c0a68414e3af19"} Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.054374 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.080868 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" podStartSLOduration=2.378040919 podStartE2EDuration="5.080823321s" podCreationTimestamp="2025-11-24 17:14:40 +0000 UTC" firstStartedPulling="2025-11-24 17:14:41.112218802 +0000 UTC m=+676.435100352" lastFinishedPulling="2025-11-24 17:14:43.815001164 +0000 UTC m=+679.137882754" observedRunningTime="2025-11-24 17:14:45.067767141 +0000 UTC m=+680.390648711" watchObservedRunningTime="2025-11-24 17:14:45.080823321 +0000 UTC m=+680.403704881" Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.090882 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-w6tj7" podStartSLOduration=2.48148402 podStartE2EDuration="5.090861025s" podCreationTimestamp="2025-11-24 17:14:40 +0000 UTC" firstStartedPulling="2025-11-24 17:14:41.194674619 +0000 UTC m=+676.517556199" lastFinishedPulling="2025-11-24 17:14:43.804051644 +0000 UTC m=+679.126933204" observedRunningTime="2025-11-24 17:14:45.090541906 +0000 UTC m=+680.413423496" watchObservedRunningTime="2025-11-24 17:14:45.090861025 +0000 UTC m=+680.413742585" Nov 24 17:14:45 crc kubenswrapper[4760]: I1124 17:14:45.111512 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-nl86x" podStartSLOduration=2.224696582 podStartE2EDuration="5.11149385s" podCreationTimestamp="2025-11-24 17:14:40 +0000 UTC" firstStartedPulling="2025-11-24 17:14:40.939654741 +0000 UTC m=+676.262536291" lastFinishedPulling="2025-11-24 17:14:43.826451969 +0000 UTC m=+679.149333559" observedRunningTime="2025-11-24 17:14:45.108557817 +0000 UTC m=+680.431439437" watchObservedRunningTime="2025-11-24 17:14:45.11149385 +0000 UTC m=+680.434375410" Nov 24 17:14:47 crc kubenswrapper[4760]: I1124 17:14:47.073171 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" event={"ID":"c02c0ef6-ec2f-4554-89d5-95ccd5a9af05","Type":"ContainerStarted","Data":"4c360f36a871fb646fa07fbc3222590c2f3c5cfbfd7c76210d29ce5a1d69bce1"} Nov 24 17:14:47 crc kubenswrapper[4760]: I1124 17:14:47.102585 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-tgfj2" podStartSLOduration=1.988314822 podStartE2EDuration="7.10255329s" podCreationTimestamp="2025-11-24 17:14:40 +0000 UTC" firstStartedPulling="2025-11-24 17:14:41.273989737 +0000 UTC m=+676.596871287" lastFinishedPulling="2025-11-24 17:14:46.388228205 +0000 UTC m=+681.711109755" observedRunningTime="2025-11-24 17:14:47.094627716 +0000 UTC m=+682.417509316" watchObservedRunningTime="2025-11-24 17:14:47.10255329 +0000 UTC m=+682.425434880" Nov 24 17:14:50 crc kubenswrapper[4760]: I1124 17:14:50.941616 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-nl86x" Nov 24 17:14:51 crc kubenswrapper[4760]: I1124 17:14:51.273519 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:51 crc kubenswrapper[4760]: I1124 17:14:51.273599 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:51 crc kubenswrapper[4760]: I1124 17:14:51.281065 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:52 crc kubenswrapper[4760]: I1124 17:14:52.121359 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6866949b48-d9pbn" Nov 24 17:14:52 crc kubenswrapper[4760]: I1124 17:14:52.217119 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.140044 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j"] Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.141868 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.145213 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.145483 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.159377 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j"] Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.290408 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.290455 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6v5s\" (UniqueName: \"kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.290962 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.392473 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.392594 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.392654 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6v5s\" (UniqueName: \"kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.395726 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.405550 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.423751 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6v5s\" (UniqueName: \"kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s\") pod \"collect-profiles-29400075-jdl8j\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.483677 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.746698 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j"] Nov 24 17:15:00 crc kubenswrapper[4760]: W1124 17:15:00.756380 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53aed3e3_383a_4ca7_8c46_ba7a9b2fda0c.slice/crio-66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb WatchSource:0}: Error finding container 66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb: Status 404 returned error can't find the container with id 66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb Nov 24 17:15:00 crc kubenswrapper[4760]: I1124 17:15:00.865298 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-dgm2z" Nov 24 17:15:01 crc kubenswrapper[4760]: I1124 17:15:01.198794 4760 generic.go:334] "Generic (PLEG): container finished" podID="53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" containerID="5b8ec8de9bfca3b92ae9a7c10d4576c96506bf9e28d6e5a320584102ea6835ca" exitCode=0 Nov 24 17:15:01 crc kubenswrapper[4760]: I1124 17:15:01.198883 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" event={"ID":"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c","Type":"ContainerDied","Data":"5b8ec8de9bfca3b92ae9a7c10d4576c96506bf9e28d6e5a320584102ea6835ca"} Nov 24 17:15:01 crc kubenswrapper[4760]: I1124 17:15:01.199168 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" event={"ID":"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c","Type":"ContainerStarted","Data":"66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb"} Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.510334 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.627586 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume\") pod \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.627662 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume\") pod \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.627680 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6v5s\" (UniqueName: \"kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s\") pod \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\" (UID: \"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c\") " Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.628984 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume" (OuterVolumeSpecName: "config-volume") pod "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" (UID: "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.633488 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" (UID: "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.633496 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s" (OuterVolumeSpecName: "kube-api-access-w6v5s") pod "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" (UID: "53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c"). InnerVolumeSpecName "kube-api-access-w6v5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.729201 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.729267 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6v5s\" (UniqueName: \"kubernetes.io/projected/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-kube-api-access-w6v5s\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:02 crc kubenswrapper[4760]: I1124 17:15:02.729295 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:03 crc kubenswrapper[4760]: I1124 17:15:03.216865 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" event={"ID":"53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c","Type":"ContainerDied","Data":"66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb"} Nov 24 17:15:03 crc kubenswrapper[4760]: I1124 17:15:03.216939 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66dbc8e28608221cae00d9c0055033eb1a0e3daa2b2ad1bd67d176f6fe14d6bb" Nov 24 17:15:03 crc kubenswrapper[4760]: I1124 17:15:03.216954 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.274409 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-qr42v" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" containerID="cri-o://e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e" gracePeriod=15 Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.698911 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qr42v_4e26988e-e709-4bf3-81a3-8a4666e7e0da/console/0.log" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.699225 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855630 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855698 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855719 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855786 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855829 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.855898 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.856535 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca" (OuterVolumeSpecName: "service-ca") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.856576 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh8kn\" (UniqueName: \"kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn\") pod \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\" (UID: \"4e26988e-e709-4bf3-81a3-8a4666e7e0da\") " Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.856691 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config" (OuterVolumeSpecName: "console-config") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.856769 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.856801 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.857180 4760 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.857198 4760 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.857208 4760 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.857217 4760 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.870291 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn" (OuterVolumeSpecName: "kube-api-access-mh8kn") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "kube-api-access-mh8kn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.874461 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.874796 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4e26988e-e709-4bf3-81a3-8a4666e7e0da" (UID: "4e26988e-e709-4bf3-81a3-8a4666e7e0da"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.958086 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh8kn\" (UniqueName: \"kubernetes.io/projected/4e26988e-e709-4bf3-81a3-8a4666e7e0da-kube-api-access-mh8kn\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.958126 4760 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:17 crc kubenswrapper[4760]: I1124 17:15:17.958138 4760 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4e26988e-e709-4bf3-81a3-8a4666e7e0da-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.331509 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79"] Nov 24 17:15:18 crc kubenswrapper[4760]: E1124 17:15:18.331816 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.331839 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" Nov 24 17:15:18 crc kubenswrapper[4760]: E1124 17:15:18.331866 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" containerName="collect-profiles" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.331878 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" containerName="collect-profiles" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.332070 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" containerName="collect-profiles" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.332094 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.331531 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qr42v_4e26988e-e709-4bf3-81a3-8a4666e7e0da/console/0.log" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.332611 4760 generic.go:334] "Generic (PLEG): container finished" podID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerID="e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e" exitCode=2 Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.332722 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qr42v" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.333891 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qr42v" event={"ID":"4e26988e-e709-4bf3-81a3-8a4666e7e0da","Type":"ContainerDied","Data":"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e"} Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.334171 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qr42v" event={"ID":"4e26988e-e709-4bf3-81a3-8a4666e7e0da","Type":"ContainerDied","Data":"e318057935b8a0e81b5a0efa6bb9921722e1215b4d5abc90b1c67081d704b758"} Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.334299 4760 scope.go:117] "RemoveContainer" containerID="e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.334135 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.337748 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.338067 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79"] Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.369509 4760 scope.go:117] "RemoveContainer" containerID="e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e" Nov 24 17:15:18 crc kubenswrapper[4760]: E1124 17:15:18.370162 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e\": container with ID starting with e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e not found: ID does not exist" containerID="e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.370201 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e"} err="failed to get container status \"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e\": rpc error: code = NotFound desc = could not find container \"e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e\": container with ID starting with e6770347bd2b27b5ec5d43da52bf2ee2485b3e7f79b592b0cf461528c01fa74e not found: ID does not exist" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.377432 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.377504 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.377537 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn84l\" (UniqueName: \"kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.381434 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.385904 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-qr42v"] Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.478946 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.479066 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn84l\" (UniqueName: \"kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.479215 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.479864 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.479952 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.509586 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn84l\" (UniqueName: \"kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.605638 4760 patch_prober.go:28] interesting pod/console-f9d7485db-qr42v container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.605715 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-qr42v" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.667390 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:18 crc kubenswrapper[4760]: I1124 17:15:18.964948 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79"] Nov 24 17:15:18 crc kubenswrapper[4760]: W1124 17:15:18.980052 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea4e0bc2_6410_4a4a_9fca_104f010a54e7.slice/crio-55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24 WatchSource:0}: Error finding container 55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24: Status 404 returned error can't find the container with id 55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24 Nov 24 17:15:19 crc kubenswrapper[4760]: I1124 17:15:19.345224 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerID="d6a79ba9f38e8156c0e23bcbc3d3d372f7d96fe39f348a5e3767704a15a0383d" exitCode=0 Nov 24 17:15:19 crc kubenswrapper[4760]: I1124 17:15:19.345355 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" event={"ID":"ea4e0bc2-6410-4a4a-9fca-104f010a54e7","Type":"ContainerDied","Data":"d6a79ba9f38e8156c0e23bcbc3d3d372f7d96fe39f348a5e3767704a15a0383d"} Nov 24 17:15:19 crc kubenswrapper[4760]: I1124 17:15:19.345860 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" event={"ID":"ea4e0bc2-6410-4a4a-9fca-104f010a54e7","Type":"ContainerStarted","Data":"55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24"} Nov 24 17:15:19 crc kubenswrapper[4760]: I1124 17:15:19.481156 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e26988e-e709-4bf3-81a3-8a4666e7e0da" path="/var/lib/kubelet/pods/4e26988e-e709-4bf3-81a3-8a4666e7e0da/volumes" Nov 24 17:15:21 crc kubenswrapper[4760]: I1124 17:15:21.363314 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerID="a9e48f92ddf0e509d74490331044d2b0d818f98b217226adf874f7442927d766" exitCode=0 Nov 24 17:15:21 crc kubenswrapper[4760]: I1124 17:15:21.363396 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" event={"ID":"ea4e0bc2-6410-4a4a-9fca-104f010a54e7","Type":"ContainerDied","Data":"a9e48f92ddf0e509d74490331044d2b0d818f98b217226adf874f7442927d766"} Nov 24 17:15:22 crc kubenswrapper[4760]: I1124 17:15:22.375841 4760 generic.go:334] "Generic (PLEG): container finished" podID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerID="5c93e76324640b57225e4e5cab4b5e29390b22b955607c7167f9c4fae8a089b7" exitCode=0 Nov 24 17:15:22 crc kubenswrapper[4760]: I1124 17:15:22.375899 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" event={"ID":"ea4e0bc2-6410-4a4a-9fca-104f010a54e7","Type":"ContainerDied","Data":"5c93e76324640b57225e4e5cab4b5e29390b22b955607c7167f9c4fae8a089b7"} Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.753860 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.793559 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util\") pod \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.793640 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tn84l\" (UniqueName: \"kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l\") pod \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.793692 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle\") pod \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\" (UID: \"ea4e0bc2-6410-4a4a-9fca-104f010a54e7\") " Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.794999 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle" (OuterVolumeSpecName: "bundle") pod "ea4e0bc2-6410-4a4a-9fca-104f010a54e7" (UID: "ea4e0bc2-6410-4a4a-9fca-104f010a54e7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.801621 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l" (OuterVolumeSpecName: "kube-api-access-tn84l") pod "ea4e0bc2-6410-4a4a-9fca-104f010a54e7" (UID: "ea4e0bc2-6410-4a4a-9fca-104f010a54e7"). InnerVolumeSpecName "kube-api-access-tn84l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.813956 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util" (OuterVolumeSpecName: "util") pod "ea4e0bc2-6410-4a4a-9fca-104f010a54e7" (UID: "ea4e0bc2-6410-4a4a-9fca-104f010a54e7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.895528 4760 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-util\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.895579 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tn84l\" (UniqueName: \"kubernetes.io/projected/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-kube-api-access-tn84l\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:23 crc kubenswrapper[4760]: I1124 17:15:23.895601 4760 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ea4e0bc2-6410-4a4a-9fca-104f010a54e7-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:15:24 crc kubenswrapper[4760]: I1124 17:15:24.394352 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" event={"ID":"ea4e0bc2-6410-4a4a-9fca-104f010a54e7","Type":"ContainerDied","Data":"55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24"} Nov 24 17:15:24 crc kubenswrapper[4760]: I1124 17:15:24.394405 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55caae197bc7faced32ffd78b8e26be4e45906cc6cb24658d5e6d13525223d24" Nov 24 17:15:24 crc kubenswrapper[4760]: I1124 17:15:24.394494 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.220190 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89"] Nov 24 17:15:34 crc kubenswrapper[4760]: E1124 17:15:34.220783 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="extract" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.220796 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="extract" Nov 24 17:15:34 crc kubenswrapper[4760]: E1124 17:15:34.220810 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="pull" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.220816 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="pull" Nov 24 17:15:34 crc kubenswrapper[4760]: E1124 17:15:34.220825 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="util" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.220831 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="util" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.220917 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea4e0bc2-6410-4a4a-9fca-104f010a54e7" containerName="extract" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.221316 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.223613 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.223986 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.224154 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-8f2vj" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.227144 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.227993 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.286824 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89"] Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.348225 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-webhook-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.348379 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-apiservice-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.348420 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g29qg\" (UniqueName: \"kubernetes.io/projected/9608978e-3402-4aa6-97aa-c15d47a81890-kube-api-access-g29qg\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.450094 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-apiservice-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.450143 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g29qg\" (UniqueName: \"kubernetes.io/projected/9608978e-3402-4aa6-97aa-c15d47a81890-kube-api-access-g29qg\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.450219 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-webhook-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.456063 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-webhook-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.456124 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9608978e-3402-4aa6-97aa-c15d47a81890-apiservice-cert\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.463917 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g29qg\" (UniqueName: \"kubernetes.io/projected/9608978e-3402-4aa6-97aa-c15d47a81890-kube-api-access-g29qg\") pod \"metallb-operator-controller-manager-6cbb78f8d9-xsz89\" (UID: \"9608978e-3402-4aa6-97aa-c15d47a81890\") " pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.535590 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.560839 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7"] Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.562031 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.565391 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.565622 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.565849 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-547k4" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.584355 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7"] Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.652895 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-webhook-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.652942 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v44x\" (UniqueName: \"kubernetes.io/projected/9c952901-3384-4ff2-a54a-28b709c934a7-kube-api-access-2v44x\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.652965 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-apiservice-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.754161 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-webhook-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.754215 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v44x\" (UniqueName: \"kubernetes.io/projected/9c952901-3384-4ff2-a54a-28b709c934a7-kube-api-access-2v44x\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.754237 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-apiservice-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.759676 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-apiservice-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.776137 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9c952901-3384-4ff2-a54a-28b709c934a7-webhook-cert\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.785131 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89"] Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.788452 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v44x\" (UniqueName: \"kubernetes.io/projected/9c952901-3384-4ff2-a54a-28b709c934a7-kube-api-access-2v44x\") pod \"metallb-operator-webhook-server-558dddbf45-rtbw7\" (UID: \"9c952901-3384-4ff2-a54a-28b709c934a7\") " pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:34 crc kubenswrapper[4760]: W1124 17:15:34.791175 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9608978e_3402_4aa6_97aa_c15d47a81890.slice/crio-f170f5c3a1d5872cf97f026b5c74467b939c93e2f7e04c0420024cffdda908e2 WatchSource:0}: Error finding container f170f5c3a1d5872cf97f026b5c74467b939c93e2f7e04c0420024cffdda908e2: Status 404 returned error can't find the container with id f170f5c3a1d5872cf97f026b5c74467b939c93e2f7e04c0420024cffdda908e2 Nov 24 17:15:34 crc kubenswrapper[4760]: I1124 17:15:34.920564 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:35 crc kubenswrapper[4760]: I1124 17:15:35.174681 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7"] Nov 24 17:15:35 crc kubenswrapper[4760]: W1124 17:15:35.180700 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c952901_3384_4ff2_a54a_28b709c934a7.slice/crio-7b056c25066a0aad3e4eb8fd7c1e06a29292f3bc65ea5030d0c027d9931c6b58 WatchSource:0}: Error finding container 7b056c25066a0aad3e4eb8fd7c1e06a29292f3bc65ea5030d0c027d9931c6b58: Status 404 returned error can't find the container with id 7b056c25066a0aad3e4eb8fd7c1e06a29292f3bc65ea5030d0c027d9931c6b58 Nov 24 17:15:35 crc kubenswrapper[4760]: I1124 17:15:35.481438 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" event={"ID":"9608978e-3402-4aa6-97aa-c15d47a81890","Type":"ContainerStarted","Data":"f170f5c3a1d5872cf97f026b5c74467b939c93e2f7e04c0420024cffdda908e2"} Nov 24 17:15:35 crc kubenswrapper[4760]: I1124 17:15:35.483480 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" event={"ID":"9c952901-3384-4ff2-a54a-28b709c934a7","Type":"ContainerStarted","Data":"7b056c25066a0aad3e4eb8fd7c1e06a29292f3bc65ea5030d0c027d9931c6b58"} Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.509402 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" event={"ID":"9c952901-3384-4ff2-a54a-28b709c934a7","Type":"ContainerStarted","Data":"ab981618c6e94878f4b7feec1ee6ca26ba39b308dc357ccb9282192b91198164"} Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.510163 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.512320 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" event={"ID":"9608978e-3402-4aa6-97aa-c15d47a81890","Type":"ContainerStarted","Data":"2ee41ee357b005b3e5c951741336d7cf84799e180e439397f6d314de3205bf6d"} Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.512492 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.530725 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" podStartSLOduration=1.7519680499999999 podStartE2EDuration="5.530705s" podCreationTimestamp="2025-11-24 17:15:34 +0000 UTC" firstStartedPulling="2025-11-24 17:15:35.183422856 +0000 UTC m=+730.506304406" lastFinishedPulling="2025-11-24 17:15:38.962159796 +0000 UTC m=+734.285041356" observedRunningTime="2025-11-24 17:15:39.528433216 +0000 UTC m=+734.851314806" watchObservedRunningTime="2025-11-24 17:15:39.530705 +0000 UTC m=+734.853586550" Nov 24 17:15:39 crc kubenswrapper[4760]: I1124 17:15:39.575321 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" podStartSLOduration=1.451600639 podStartE2EDuration="5.575265815s" podCreationTimestamp="2025-11-24 17:15:34 +0000 UTC" firstStartedPulling="2025-11-24 17:15:34.793166783 +0000 UTC m=+730.116048333" lastFinishedPulling="2025-11-24 17:15:38.916831949 +0000 UTC m=+734.239713509" observedRunningTime="2025-11-24 17:15:39.570564303 +0000 UTC m=+734.893445853" watchObservedRunningTime="2025-11-24 17:15:39.575265815 +0000 UTC m=+734.898147405" Nov 24 17:15:54 crc kubenswrapper[4760]: I1124 17:15:54.938522 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-558dddbf45-rtbw7" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.549636 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.551656 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.565725 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.624131 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh848\" (UniqueName: \"kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.624216 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.624492 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.725626 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.725717 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh848\" (UniqueName: \"kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.725769 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.726308 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.726439 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.746508 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh848\" (UniqueName: \"kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848\") pod \"redhat-operators-dk7kw\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:00 crc kubenswrapper[4760]: I1124 17:16:00.879512 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:01 crc kubenswrapper[4760]: I1124 17:16:01.328507 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:01 crc kubenswrapper[4760]: I1124 17:16:01.681839 4760 generic.go:334] "Generic (PLEG): container finished" podID="de1be6db-cf65-422d-b758-72ca53ebd713" containerID="44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a" exitCode=0 Nov 24 17:16:01 crc kubenswrapper[4760]: I1124 17:16:01.681879 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerDied","Data":"44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a"} Nov 24 17:16:01 crc kubenswrapper[4760]: I1124 17:16:01.681902 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerStarted","Data":"d35ecb18e498e3d4b185063784d9146c386dec676438dadbd3e2799dfca0ffbb"} Nov 24 17:16:02 crc kubenswrapper[4760]: I1124 17:16:02.692845 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerStarted","Data":"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414"} Nov 24 17:16:03 crc kubenswrapper[4760]: I1124 17:16:03.375160 4760 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 17:16:03 crc kubenswrapper[4760]: I1124 17:16:03.698433 4760 generic.go:334] "Generic (PLEG): container finished" podID="de1be6db-cf65-422d-b758-72ca53ebd713" containerID="fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414" exitCode=0 Nov 24 17:16:03 crc kubenswrapper[4760]: I1124 17:16:03.698474 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerDied","Data":"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414"} Nov 24 17:16:04 crc kubenswrapper[4760]: I1124 17:16:04.708962 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerStarted","Data":"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9"} Nov 24 17:16:04 crc kubenswrapper[4760]: I1124 17:16:04.732304 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dk7kw" podStartSLOduration=2.283597535 podStartE2EDuration="4.732289871s" podCreationTimestamp="2025-11-24 17:16:00 +0000 UTC" firstStartedPulling="2025-11-24 17:16:01.683803681 +0000 UTC m=+757.006685221" lastFinishedPulling="2025-11-24 17:16:04.132495997 +0000 UTC m=+759.455377557" observedRunningTime="2025-11-24 17:16:04.730332376 +0000 UTC m=+760.053213946" watchObservedRunningTime="2025-11-24 17:16:04.732289871 +0000 UTC m=+760.055171421" Nov 24 17:16:05 crc kubenswrapper[4760]: I1124 17:16:05.642368 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:16:05 crc kubenswrapper[4760]: I1124 17:16:05.642470 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:16:10 crc kubenswrapper[4760]: I1124 17:16:10.879933 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:10 crc kubenswrapper[4760]: I1124 17:16:10.880921 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:10 crc kubenswrapper[4760]: I1124 17:16:10.954692 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:11 crc kubenswrapper[4760]: I1124 17:16:11.842262 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:11 crc kubenswrapper[4760]: I1124 17:16:11.907529 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:13 crc kubenswrapper[4760]: I1124 17:16:13.778818 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dk7kw" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="registry-server" containerID="cri-o://fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9" gracePeriod=2 Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.270870 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.329772 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities\") pod \"de1be6db-cf65-422d-b758-72ca53ebd713\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.329892 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content\") pod \"de1be6db-cf65-422d-b758-72ca53ebd713\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.330047 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh848\" (UniqueName: \"kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848\") pod \"de1be6db-cf65-422d-b758-72ca53ebd713\" (UID: \"de1be6db-cf65-422d-b758-72ca53ebd713\") " Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.331867 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities" (OuterVolumeSpecName: "utilities") pod "de1be6db-cf65-422d-b758-72ca53ebd713" (UID: "de1be6db-cf65-422d-b758-72ca53ebd713"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.340098 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848" (OuterVolumeSpecName: "kube-api-access-xh848") pod "de1be6db-cf65-422d-b758-72ca53ebd713" (UID: "de1be6db-cf65-422d-b758-72ca53ebd713"). InnerVolumeSpecName "kube-api-access-xh848". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.423639 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de1be6db-cf65-422d-b758-72ca53ebd713" (UID: "de1be6db-cf65-422d-b758-72ca53ebd713"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.431968 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.431995 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de1be6db-cf65-422d-b758-72ca53ebd713-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.432030 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh848\" (UniqueName: \"kubernetes.io/projected/de1be6db-cf65-422d-b758-72ca53ebd713-kube-api-access-xh848\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.539395 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6cbb78f8d9-xsz89" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.786299 4760 generic.go:334] "Generic (PLEG): container finished" podID="de1be6db-cf65-422d-b758-72ca53ebd713" containerID="fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9" exitCode=0 Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.786360 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerDied","Data":"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9"} Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.786386 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dk7kw" event={"ID":"de1be6db-cf65-422d-b758-72ca53ebd713","Type":"ContainerDied","Data":"d35ecb18e498e3d4b185063784d9146c386dec676438dadbd3e2799dfca0ffbb"} Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.786404 4760 scope.go:117] "RemoveContainer" containerID="fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.786434 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dk7kw" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.805424 4760 scope.go:117] "RemoveContainer" containerID="fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.813635 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.817569 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dk7kw"] Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.884803 4760 scope.go:117] "RemoveContainer" containerID="44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.907182 4760 scope.go:117] "RemoveContainer" containerID="fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9" Nov 24 17:16:14 crc kubenswrapper[4760]: E1124 17:16:14.907609 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9\": container with ID starting with fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9 not found: ID does not exist" containerID="fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.907647 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9"} err="failed to get container status \"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9\": rpc error: code = NotFound desc = could not find container \"fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9\": container with ID starting with fad638e61ff09d21c17b33292cc35205398c08ceb8c21a8c38a965793341e8c9 not found: ID does not exist" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.907673 4760 scope.go:117] "RemoveContainer" containerID="fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414" Nov 24 17:16:14 crc kubenswrapper[4760]: E1124 17:16:14.908049 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414\": container with ID starting with fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414 not found: ID does not exist" containerID="fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.908079 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414"} err="failed to get container status \"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414\": rpc error: code = NotFound desc = could not find container \"fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414\": container with ID starting with fe3cdc981a224d3760dd54be043dd38cc123dbb683a8841be38b8dc4bc102414 not found: ID does not exist" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.908096 4760 scope.go:117] "RemoveContainer" containerID="44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a" Nov 24 17:16:14 crc kubenswrapper[4760]: E1124 17:16:14.908438 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a\": container with ID starting with 44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a not found: ID does not exist" containerID="44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a" Nov 24 17:16:14 crc kubenswrapper[4760]: I1124 17:16:14.908504 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a"} err="failed to get container status \"44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a\": rpc error: code = NotFound desc = could not find container \"44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a\": container with ID starting with 44ec4e0bc370280cd0c87ddd4c47fd15e311becc67baf513767c14ab0648449a not found: ID does not exist" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.279156 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-qk694"] Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.279743 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="extract-content" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.279760 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="extract-content" Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.279774 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="registry-server" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.279781 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="registry-server" Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.279805 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="extract-utilities" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.279814 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="extract-utilities" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.279947 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" containerName="registry-server" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.280431 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.287584 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-zhflx" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.287639 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.290552 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-j4nl8"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.301547 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-qk694"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.301678 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.307219 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.308177 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344165 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344311 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh7r7\" (UniqueName: \"kubernetes.io/projected/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-kube-api-access-hh7r7\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344349 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtv4g\" (UniqueName: \"kubernetes.io/projected/9debe41b-d028-4243-be0e-8d191f93d290-kube-api-access-rtv4g\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344402 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9debe41b-d028-4243-be0e-8d191f93d290-frr-startup\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344435 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-metrics\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344468 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-cert\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344625 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-reloader\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344689 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-conf\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.344731 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-sockets\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.378764 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-z7h4x"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.379561 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.381241 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.382359 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.383057 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.383907 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-rzjmg" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.403837 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-r22lk"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.405043 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.408810 4760 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.412851 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-r22lk"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.445875 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-cert\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.445929 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-reloader\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.445954 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-metrics-certs\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.445974 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m88hk\" (UniqueName: \"kubernetes.io/projected/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-kube-api-access-m88hk\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.445994 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-conf\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446023 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446040 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-sockets\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446066 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-cert\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446106 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-222gk\" (UniqueName: \"kubernetes.io/projected/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-kube-api-access-222gk\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446132 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446160 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metrics-certs\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446180 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtv4g\" (UniqueName: \"kubernetes.io/projected/9debe41b-d028-4243-be0e-8d191f93d290-kube-api-access-rtv4g\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446196 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh7r7\" (UniqueName: \"kubernetes.io/projected/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-kube-api-access-hh7r7\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446237 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9debe41b-d028-4243-be0e-8d191f93d290-frr-startup\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446258 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-metrics\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446273 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metallb-excludel2\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446390 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-reloader\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.446499 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-conf\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.446513 4760 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.446573 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs podName:9debe41b-d028-4243-be0e-8d191f93d290 nodeName:}" failed. No retries permitted until 2025-11-24 17:16:15.946543202 +0000 UTC m=+771.269424752 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs") pod "frr-k8s-j4nl8" (UID: "9debe41b-d028-4243-be0e-8d191f93d290") : secret "frr-k8s-certs-secret" not found Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.447042 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-frr-sockets\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.447286 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9debe41b-d028-4243-be0e-8d191f93d290-frr-startup\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.447612 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9debe41b-d028-4243-be0e-8d191f93d290-metrics\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.453679 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-cert\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.460991 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh7r7\" (UniqueName: \"kubernetes.io/projected/858bafc0-44a3-4e65-9a8f-0da3e8d6f624-kube-api-access-hh7r7\") pod \"frr-k8s-webhook-server-6998585d5-qk694\" (UID: \"858bafc0-44a3-4e65-9a8f-0da3e8d6f624\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.467906 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtv4g\" (UniqueName: \"kubernetes.io/projected/9debe41b-d028-4243-be0e-8d191f93d290-kube-api-access-rtv4g\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.473961 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de1be6db-cf65-422d-b758-72ca53ebd713" path="/var/lib/kubelet/pods/de1be6db-cf65-422d-b758-72ca53ebd713/volumes" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548264 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-222gk\" (UniqueName: \"kubernetes.io/projected/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-kube-api-access-222gk\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548406 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metrics-certs\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548454 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metallb-excludel2\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548508 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-metrics-certs\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548530 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m88hk\" (UniqueName: \"kubernetes.io/projected/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-kube-api-access-m88hk\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548559 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.548578 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-cert\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.549808 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metallb-excludel2\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.549142 4760 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 17:16:15 crc kubenswrapper[4760]: E1124 17:16:15.550049 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist podName:576e8cbe-2d96-43c5-a62c-d4f22abdc21a nodeName:}" failed. No retries permitted until 2025-11-24 17:16:16.050031628 +0000 UTC m=+771.372913178 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist") pod "speaker-z7h4x" (UID: "576e8cbe-2d96-43c5-a62c-d4f22abdc21a") : secret "metallb-memberlist" not found Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.551846 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-metrics-certs\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.553142 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-metrics-certs\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.562466 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-cert\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.565219 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m88hk\" (UniqueName: \"kubernetes.io/projected/b2d37ad6-a6ac-4c40-82e2-4eb9319e9244-kube-api-access-m88hk\") pod \"controller-6c7b4b5f48-r22lk\" (UID: \"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244\") " pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.567314 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-222gk\" (UniqueName: \"kubernetes.io/projected/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-kube-api-access-222gk\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.594362 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.717045 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.943882 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-r22lk"] Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.954098 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:15 crc kubenswrapper[4760]: I1124 17:16:15.959829 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9debe41b-d028-4243-be0e-8d191f93d290-metrics-certs\") pod \"frr-k8s-j4nl8\" (UID: \"9debe41b-d028-4243-be0e-8d191f93d290\") " pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.029148 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-qk694"] Nov 24 17:16:16 crc kubenswrapper[4760]: W1124 17:16:16.042683 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod858bafc0_44a3_4e65_9a8f_0da3e8d6f624.slice/crio-b0ccc128a3403984d3306cac787fa1e7fbc7b6f509c60f0a0a42ba4230be1b0d WatchSource:0}: Error finding container b0ccc128a3403984d3306cac787fa1e7fbc7b6f509c60f0a0a42ba4230be1b0d: Status 404 returned error can't find the container with id b0ccc128a3403984d3306cac787fa1e7fbc7b6f509c60f0a0a42ba4230be1b0d Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.055293 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:16 crc kubenswrapper[4760]: E1124 17:16:16.055467 4760 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 17:16:16 crc kubenswrapper[4760]: E1124 17:16:16.055539 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist podName:576e8cbe-2d96-43c5-a62c-d4f22abdc21a nodeName:}" failed. No retries permitted until 2025-11-24 17:16:17.055520866 +0000 UTC m=+772.378402416 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist") pod "speaker-z7h4x" (UID: "576e8cbe-2d96-43c5-a62c-d4f22abdc21a") : secret "metallb-memberlist" not found Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.214483 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.803334 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"5d17800a198bc9e09765561ad97bc8d482c37ad8d3e20e4e326edd0d7bd54159"} Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.805784 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" event={"ID":"858bafc0-44a3-4e65-9a8f-0da3e8d6f624","Type":"ContainerStarted","Data":"b0ccc128a3403984d3306cac787fa1e7fbc7b6f509c60f0a0a42ba4230be1b0d"} Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.808243 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-r22lk" event={"ID":"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244","Type":"ContainerStarted","Data":"ced73270b699daba93ff8edccb41692b5f68e25b36cf5e58bafe0652af095fb7"} Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.808277 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-r22lk" event={"ID":"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244","Type":"ContainerStarted","Data":"246d6a64533b4ec9d02893259223d67c82a27cdb0b049295c7120502beb22e16"} Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.808289 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-r22lk" event={"ID":"b2d37ad6-a6ac-4c40-82e2-4eb9319e9244","Type":"ContainerStarted","Data":"c11e0b0f91a29549ec3bb4522aaf3b55e415e5c0d3deb19d31b48c29c15529a8"} Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.808524 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:16 crc kubenswrapper[4760]: I1124 17:16:16.835719 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-r22lk" podStartSLOduration=1.8356958030000001 podStartE2EDuration="1.835695803s" podCreationTimestamp="2025-11-24 17:16:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:16:16.829122578 +0000 UTC m=+772.152004158" watchObservedRunningTime="2025-11-24 17:16:16.835695803 +0000 UTC m=+772.158577393" Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.069891 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.082253 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/576e8cbe-2d96-43c5-a62c-d4f22abdc21a-memberlist\") pod \"speaker-z7h4x\" (UID: \"576e8cbe-2d96-43c5-a62c-d4f22abdc21a\") " pod="metallb-system/speaker-z7h4x" Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.202869 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-z7h4x" Nov 24 17:16:17 crc kubenswrapper[4760]: W1124 17:16:17.240317 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod576e8cbe_2d96_43c5_a62c_d4f22abdc21a.slice/crio-b3acff06c66b4dadab299b7eed9a79b67fbdbb23fa74a43e7d9cad7f9254a8cd WatchSource:0}: Error finding container b3acff06c66b4dadab299b7eed9a79b67fbdbb23fa74a43e7d9cad7f9254a8cd: Status 404 returned error can't find the container with id b3acff06c66b4dadab299b7eed9a79b67fbdbb23fa74a43e7d9cad7f9254a8cd Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.815720 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z7h4x" event={"ID":"576e8cbe-2d96-43c5-a62c-d4f22abdc21a","Type":"ContainerStarted","Data":"2c56671e1b7c1db0758bafc3715b4983dd3d5297332acf57dc2442301e07db25"} Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.815767 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z7h4x" event={"ID":"576e8cbe-2d96-43c5-a62c-d4f22abdc21a","Type":"ContainerStarted","Data":"c9112364e62caff86792780b38323b3dada07f907c90941123e69fdc799808a7"} Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.815793 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z7h4x" event={"ID":"576e8cbe-2d96-43c5-a62c-d4f22abdc21a","Type":"ContainerStarted","Data":"b3acff06c66b4dadab299b7eed9a79b67fbdbb23fa74a43e7d9cad7f9254a8cd"} Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.816050 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-z7h4x" Nov 24 17:16:17 crc kubenswrapper[4760]: I1124 17:16:17.834653 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-z7h4x" podStartSLOduration=2.8346368010000003 podStartE2EDuration="2.834636801s" podCreationTimestamp="2025-11-24 17:16:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:16:17.830745131 +0000 UTC m=+773.153626691" watchObservedRunningTime="2025-11-24 17:16:17.834636801 +0000 UTC m=+773.157518351" Nov 24 17:16:23 crc kubenswrapper[4760]: I1124 17:16:23.892606 4760 generic.go:334] "Generic (PLEG): container finished" podID="9debe41b-d028-4243-be0e-8d191f93d290" containerID="b90462107558c0ca370b5783407263cadce8af3a161bd61bab259c063f621698" exitCode=0 Nov 24 17:16:23 crc kubenswrapper[4760]: I1124 17:16:23.892693 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerDied","Data":"b90462107558c0ca370b5783407263cadce8af3a161bd61bab259c063f621698"} Nov 24 17:16:23 crc kubenswrapper[4760]: I1124 17:16:23.895339 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" event={"ID":"858bafc0-44a3-4e65-9a8f-0da3e8d6f624","Type":"ContainerStarted","Data":"ba552e0d00ff9b3475898ab2a9ad042505a860265f961acd5f54c5381ab2d6e0"} Nov 24 17:16:23 crc kubenswrapper[4760]: I1124 17:16:23.895537 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:23 crc kubenswrapper[4760]: I1124 17:16:23.959936 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" podStartSLOduration=2.111919667 podStartE2EDuration="8.959919482s" podCreationTimestamp="2025-11-24 17:16:15 +0000 UTC" firstStartedPulling="2025-11-24 17:16:16.045292798 +0000 UTC m=+771.368174358" lastFinishedPulling="2025-11-24 17:16:22.893292583 +0000 UTC m=+778.216174173" observedRunningTime="2025-11-24 17:16:23.948882162 +0000 UTC m=+779.271763712" watchObservedRunningTime="2025-11-24 17:16:23.959919482 +0000 UTC m=+779.282801032" Nov 24 17:16:24 crc kubenswrapper[4760]: I1124 17:16:24.905504 4760 generic.go:334] "Generic (PLEG): container finished" podID="9debe41b-d028-4243-be0e-8d191f93d290" containerID="77f07bf1b9aa64820a661e0a20d5a53d3d9f1e48b30df7465e293fcf680c3925" exitCode=0 Nov 24 17:16:24 crc kubenswrapper[4760]: I1124 17:16:24.907165 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerDied","Data":"77f07bf1b9aa64820a661e0a20d5a53d3d9f1e48b30df7465e293fcf680c3925"} Nov 24 17:16:25 crc kubenswrapper[4760]: I1124 17:16:25.914817 4760 generic.go:334] "Generic (PLEG): container finished" podID="9debe41b-d028-4243-be0e-8d191f93d290" containerID="f63b154441d4dab35f66db1420986e16c248ba63fb4110fae387572782db4a84" exitCode=0 Nov 24 17:16:25 crc kubenswrapper[4760]: I1124 17:16:25.914913 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerDied","Data":"f63b154441d4dab35f66db1420986e16c248ba63fb4110fae387572782db4a84"} Nov 24 17:16:26 crc kubenswrapper[4760]: I1124 17:16:26.927837 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"9fa648ed8fa40fda5dbcbf00eb07efe436ccd0b8d98506e885d7d82e53fbbcf7"} Nov 24 17:16:26 crc kubenswrapper[4760]: I1124 17:16:26.928227 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"85bc7e52071f293946e866368b14ec3357b3ce19059e31a8accab6afd453311a"} Nov 24 17:16:26 crc kubenswrapper[4760]: I1124 17:16:26.928248 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"a00a8785dfcdfbaae9d2895a200944af7c0a9a239a5bdb7cffb9fa074caef8df"} Nov 24 17:16:26 crc kubenswrapper[4760]: I1124 17:16:26.928267 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"16d1e438b691d8e3fe1534983ff857b10175d6b7cd2703afca95327c90238269"} Nov 24 17:16:27 crc kubenswrapper[4760]: I1124 17:16:27.210786 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-z7h4x" Nov 24 17:16:27 crc kubenswrapper[4760]: I1124 17:16:27.939109 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"77f764ae1bacce8406a51c0cf806bea05f2045c7016a26157c0bc9f276e85e2c"} Nov 24 17:16:27 crc kubenswrapper[4760]: I1124 17:16:27.939414 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:27 crc kubenswrapper[4760]: I1124 17:16:27.939425 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-j4nl8" event={"ID":"9debe41b-d028-4243-be0e-8d191f93d290","Type":"ContainerStarted","Data":"0999a41314d2f43b9a8f6778bd3ba2e83a0ef76d729c51203d1a1ab6932104af"} Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.958688 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-j4nl8" podStartSLOduration=8.337379832 podStartE2EDuration="14.958664631s" podCreationTimestamp="2025-11-24 17:16:15 +0000 UTC" firstStartedPulling="2025-11-24 17:16:16.33149145 +0000 UTC m=+771.654373010" lastFinishedPulling="2025-11-24 17:16:22.952776249 +0000 UTC m=+778.275657809" observedRunningTime="2025-11-24 17:16:27.998842595 +0000 UTC m=+783.321724155" watchObservedRunningTime="2025-11-24 17:16:29.958664631 +0000 UTC m=+785.281546191" Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.960737 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.961545 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.964147 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-rnzmm" Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.964202 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.965089 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 24 17:16:29 crc kubenswrapper[4760]: I1124 17:16:29.996390 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.091537 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ffb6\" (UniqueName: \"kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6\") pod \"openstack-operator-index-tqscj\" (UID: \"8cf6e933-ef07-4998-9ce9-2470609096ba\") " pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.192520 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ffb6\" (UniqueName: \"kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6\") pod \"openstack-operator-index-tqscj\" (UID: \"8cf6e933-ef07-4998-9ce9-2470609096ba\") " pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.212312 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ffb6\" (UniqueName: \"kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6\") pod \"openstack-operator-index-tqscj\" (UID: \"8cf6e933-ef07-4998-9ce9-2470609096ba\") " pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.289293 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.776633 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:30 crc kubenswrapper[4760]: I1124 17:16:30.963280 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tqscj" event={"ID":"8cf6e933-ef07-4998-9ce9-2470609096ba","Type":"ContainerStarted","Data":"b452cb9058d29a0921930d32f7e670044cbd4377f721874abd4a5def5dee2484"} Nov 24 17:16:31 crc kubenswrapper[4760]: I1124 17:16:31.216201 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:31 crc kubenswrapper[4760]: I1124 17:16:31.261605 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:33 crc kubenswrapper[4760]: I1124 17:16:33.984873 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tqscj" event={"ID":"8cf6e933-ef07-4998-9ce9-2470609096ba","Type":"ContainerStarted","Data":"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57"} Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.003396 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-tqscj" podStartSLOduration=2.608338812 podStartE2EDuration="5.003367836s" podCreationTimestamp="2025-11-24 17:16:29 +0000 UTC" firstStartedPulling="2025-11-24 17:16:30.781402923 +0000 UTC m=+786.104284483" lastFinishedPulling="2025-11-24 17:16:33.176431957 +0000 UTC m=+788.499313507" observedRunningTime="2025-11-24 17:16:34.002403699 +0000 UTC m=+789.325285339" watchObservedRunningTime="2025-11-24 17:16:34.003367836 +0000 UTC m=+789.326249406" Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.112185 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.924470 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-7jdkl"] Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.925850 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.942584 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7jdkl"] Nov 24 17:16:34 crc kubenswrapper[4760]: I1124 17:16:34.963607 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64dhn\" (UniqueName: \"kubernetes.io/projected/baed48fd-5a3a-482e-a24e-2aff550b63dc-kube-api-access-64dhn\") pod \"openstack-operator-index-7jdkl\" (UID: \"baed48fd-5a3a-482e-a24e-2aff550b63dc\") " pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.066497 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64dhn\" (UniqueName: \"kubernetes.io/projected/baed48fd-5a3a-482e-a24e-2aff550b63dc-kube-api-access-64dhn\") pod \"openstack-operator-index-7jdkl\" (UID: \"baed48fd-5a3a-482e-a24e-2aff550b63dc\") " pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.094555 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64dhn\" (UniqueName: \"kubernetes.io/projected/baed48fd-5a3a-482e-a24e-2aff550b63dc-kube-api-access-64dhn\") pod \"openstack-operator-index-7jdkl\" (UID: \"baed48fd-5a3a-482e-a24e-2aff550b63dc\") " pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.257497 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.601320 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-qk694" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.642440 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.642502 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.714830 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7jdkl"] Nov 24 17:16:35 crc kubenswrapper[4760]: I1124 17:16:35.720808 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-r22lk" Nov 24 17:16:35 crc kubenswrapper[4760]: W1124 17:16:35.730244 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbaed48fd_5a3a_482e_a24e_2aff550b63dc.slice/crio-b1cbe361c4f85c2b400b686f6a5e250430f7d9620acf7df8d73dcb710ac7b8ae WatchSource:0}: Error finding container b1cbe361c4f85c2b400b686f6a5e250430f7d9620acf7df8d73dcb710ac7b8ae: Status 404 returned error can't find the container with id b1cbe361c4f85c2b400b686f6a5e250430f7d9620acf7df8d73dcb710ac7b8ae Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.006675 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7jdkl" event={"ID":"baed48fd-5a3a-482e-a24e-2aff550b63dc","Type":"ContainerStarted","Data":"cff4db2fa6448ba5b1afb73d6f62cc1e74c3fa94f6b9585afa8dacec98cfe7c9"} Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.006721 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7jdkl" event={"ID":"baed48fd-5a3a-482e-a24e-2aff550b63dc","Type":"ContainerStarted","Data":"b1cbe361c4f85c2b400b686f6a5e250430f7d9620acf7df8d73dcb710ac7b8ae"} Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.007142 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-tqscj" podUID="8cf6e933-ef07-4998-9ce9-2470609096ba" containerName="registry-server" containerID="cri-o://3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57" gracePeriod=2 Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.034328 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-7jdkl" podStartSLOduration=1.982042291 podStartE2EDuration="2.034298935s" podCreationTimestamp="2025-11-24 17:16:34 +0000 UTC" firstStartedPulling="2025-11-24 17:16:35.733980464 +0000 UTC m=+791.056862014" lastFinishedPulling="2025-11-24 17:16:35.786237108 +0000 UTC m=+791.109118658" observedRunningTime="2025-11-24 17:16:36.031326292 +0000 UTC m=+791.354207852" watchObservedRunningTime="2025-11-24 17:16:36.034298935 +0000 UTC m=+791.357180485" Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.230416 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-j4nl8" Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.383194 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.488776 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ffb6\" (UniqueName: \"kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6\") pod \"8cf6e933-ef07-4998-9ce9-2470609096ba\" (UID: \"8cf6e933-ef07-4998-9ce9-2470609096ba\") " Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.493852 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6" (OuterVolumeSpecName: "kube-api-access-7ffb6") pod "8cf6e933-ef07-4998-9ce9-2470609096ba" (UID: "8cf6e933-ef07-4998-9ce9-2470609096ba"). InnerVolumeSpecName "kube-api-access-7ffb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:16:36 crc kubenswrapper[4760]: I1124 17:16:36.590594 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ffb6\" (UniqueName: \"kubernetes.io/projected/8cf6e933-ef07-4998-9ce9-2470609096ba-kube-api-access-7ffb6\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.017081 4760 generic.go:334] "Generic (PLEG): container finished" podID="8cf6e933-ef07-4998-9ce9-2470609096ba" containerID="3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57" exitCode=0 Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.017192 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tqscj" event={"ID":"8cf6e933-ef07-4998-9ce9-2470609096ba","Type":"ContainerDied","Data":"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57"} Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.017210 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tqscj" Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.017631 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tqscj" event={"ID":"8cf6e933-ef07-4998-9ce9-2470609096ba","Type":"ContainerDied","Data":"b452cb9058d29a0921930d32f7e670044cbd4377f721874abd4a5def5dee2484"} Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.017675 4760 scope.go:117] "RemoveContainer" containerID="3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57" Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.045594 4760 scope.go:117] "RemoveContainer" containerID="3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57" Nov 24 17:16:37 crc kubenswrapper[4760]: E1124 17:16:37.046832 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57\": container with ID starting with 3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57 not found: ID does not exist" containerID="3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57" Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.046877 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57"} err="failed to get container status \"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57\": rpc error: code = NotFound desc = could not find container \"3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57\": container with ID starting with 3a8c8d9860d743a1d093397f60ae8914fa9c3dc4c32fe37e1505db63b638fc57 not found: ID does not exist" Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.069727 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.075921 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-tqscj"] Nov 24 17:16:37 crc kubenswrapper[4760]: I1124 17:16:37.479676 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cf6e933-ef07-4998-9ce9-2470609096ba" path="/var/lib/kubelet/pods/8cf6e933-ef07-4998-9ce9-2470609096ba/volumes" Nov 24 17:16:45 crc kubenswrapper[4760]: I1124 17:16:45.286448 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:45 crc kubenswrapper[4760]: I1124 17:16:45.286859 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:45 crc kubenswrapper[4760]: I1124 17:16:45.325152 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:46 crc kubenswrapper[4760]: I1124 17:16:46.124464 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-7jdkl" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.802834 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p"] Nov 24 17:16:47 crc kubenswrapper[4760]: E1124 17:16:47.804237 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cf6e933-ef07-4998-9ce9-2470609096ba" containerName="registry-server" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.804271 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cf6e933-ef07-4998-9ce9-2470609096ba" containerName="registry-server" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.804542 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cf6e933-ef07-4998-9ce9-2470609096ba" containerName="registry-server" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.806523 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.809671 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wpg8v" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.814903 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p"] Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.860883 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.860948 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-769lc\" (UniqueName: \"kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.861043 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.961894 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.962094 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-769lc\" (UniqueName: \"kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.962138 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.962771 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.962910 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:47 crc kubenswrapper[4760]: I1124 17:16:47.993523 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-769lc\" (UniqueName: \"kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc\") pod \"f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:48 crc kubenswrapper[4760]: I1124 17:16:48.126912 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:48 crc kubenswrapper[4760]: I1124 17:16:48.604782 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p"] Nov 24 17:16:49 crc kubenswrapper[4760]: I1124 17:16:49.111721 4760 generic.go:334] "Generic (PLEG): container finished" podID="a984675f-9d67-4699-a5c4-819cda440d13" containerID="cfd999068f24aed92a1a869ef40439b86b09ee1e14542367826f34f87c33edb3" exitCode=0 Nov 24 17:16:49 crc kubenswrapper[4760]: I1124 17:16:49.111807 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" event={"ID":"a984675f-9d67-4699-a5c4-819cda440d13","Type":"ContainerDied","Data":"cfd999068f24aed92a1a869ef40439b86b09ee1e14542367826f34f87c33edb3"} Nov 24 17:16:49 crc kubenswrapper[4760]: I1124 17:16:49.112086 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" event={"ID":"a984675f-9d67-4699-a5c4-819cda440d13","Type":"ContainerStarted","Data":"24cdb205cd0ccad34b537ff0f19ef5fba8e6e75f819b11b6e94a4e20aa9295f3"} Nov 24 17:16:50 crc kubenswrapper[4760]: I1124 17:16:50.121604 4760 generic.go:334] "Generic (PLEG): container finished" podID="a984675f-9d67-4699-a5c4-819cda440d13" containerID="30d4221f59cc6ded764d13ac2d3527ee1fe84f5e06f4595a86de1d686e3dea29" exitCode=0 Nov 24 17:16:50 crc kubenswrapper[4760]: I1124 17:16:50.121734 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" event={"ID":"a984675f-9d67-4699-a5c4-819cda440d13","Type":"ContainerDied","Data":"30d4221f59cc6ded764d13ac2d3527ee1fe84f5e06f4595a86de1d686e3dea29"} Nov 24 17:16:51 crc kubenswrapper[4760]: I1124 17:16:51.134350 4760 generic.go:334] "Generic (PLEG): container finished" podID="a984675f-9d67-4699-a5c4-819cda440d13" containerID="b9d2730701f00bce006d384371b41dd2352b38aededf58ec864f02da5c52982e" exitCode=0 Nov 24 17:16:51 crc kubenswrapper[4760]: I1124 17:16:51.134482 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" event={"ID":"a984675f-9d67-4699-a5c4-819cda440d13","Type":"ContainerDied","Data":"b9d2730701f00bce006d384371b41dd2352b38aededf58ec864f02da5c52982e"} Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.426461 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.541847 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle\") pod \"a984675f-9d67-4699-a5c4-819cda440d13\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.541947 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util\") pod \"a984675f-9d67-4699-a5c4-819cda440d13\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.542039 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-769lc\" (UniqueName: \"kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc\") pod \"a984675f-9d67-4699-a5c4-819cda440d13\" (UID: \"a984675f-9d67-4699-a5c4-819cda440d13\") " Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.543529 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle" (OuterVolumeSpecName: "bundle") pod "a984675f-9d67-4699-a5c4-819cda440d13" (UID: "a984675f-9d67-4699-a5c4-819cda440d13"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.544900 4760 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.554632 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc" (OuterVolumeSpecName: "kube-api-access-769lc") pod "a984675f-9d67-4699-a5c4-819cda440d13" (UID: "a984675f-9d67-4699-a5c4-819cda440d13"). InnerVolumeSpecName "kube-api-access-769lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.576668 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util" (OuterVolumeSpecName: "util") pod "a984675f-9d67-4699-a5c4-819cda440d13" (UID: "a984675f-9d67-4699-a5c4-819cda440d13"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.645858 4760 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a984675f-9d67-4699-a5c4-819cda440d13-util\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:52 crc kubenswrapper[4760]: I1124 17:16:52.645911 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-769lc\" (UniqueName: \"kubernetes.io/projected/a984675f-9d67-4699-a5c4-819cda440d13-kube-api-access-769lc\") on node \"crc\" DevicePath \"\"" Nov 24 17:16:53 crc kubenswrapper[4760]: I1124 17:16:53.153335 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" event={"ID":"a984675f-9d67-4699-a5c4-819cda440d13","Type":"ContainerDied","Data":"24cdb205cd0ccad34b537ff0f19ef5fba8e6e75f819b11b6e94a4e20aa9295f3"} Nov 24 17:16:53 crc kubenswrapper[4760]: I1124 17:16:53.153401 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p" Nov 24 17:16:53 crc kubenswrapper[4760]: I1124 17:16:53.153410 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24cdb205cd0ccad34b537ff0f19ef5fba8e6e75f819b11b6e94a4e20aa9295f3" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.478442 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb"] Nov 24 17:17:00 crc kubenswrapper[4760]: E1124 17:17:00.479337 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="extract" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.479357 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="extract" Nov 24 17:17:00 crc kubenswrapper[4760]: E1124 17:17:00.479380 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="util" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.479392 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="util" Nov 24 17:17:00 crc kubenswrapper[4760]: E1124 17:17:00.479412 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="pull" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.479425 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="pull" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.479620 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a984675f-9d67-4699-a5c4-819cda440d13" containerName="extract" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.480641 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.483541 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-gw8ms" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.524327 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb"] Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.563093 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s77xw\" (UniqueName: \"kubernetes.io/projected/20782ce3-a28a-4fa7-a4c1-ae186c4e9f44-kube-api-access-s77xw\") pod \"openstack-operator-controller-operator-d5ff89cd9-mtcgb\" (UID: \"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44\") " pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.664193 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s77xw\" (UniqueName: \"kubernetes.io/projected/20782ce3-a28a-4fa7-a4c1-ae186c4e9f44-kube-api-access-s77xw\") pod \"openstack-operator-controller-operator-d5ff89cd9-mtcgb\" (UID: \"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44\") " pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.687260 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s77xw\" (UniqueName: \"kubernetes.io/projected/20782ce3-a28a-4fa7-a4c1-ae186c4e9f44-kube-api-access-s77xw\") pod \"openstack-operator-controller-operator-d5ff89cd9-mtcgb\" (UID: \"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44\") " pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:00 crc kubenswrapper[4760]: I1124 17:17:00.800599 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:01 crc kubenswrapper[4760]: I1124 17:17:01.276110 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb"] Nov 24 17:17:02 crc kubenswrapper[4760]: I1124 17:17:02.218861 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" event={"ID":"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44","Type":"ContainerStarted","Data":"6d2ab5f78f9800053aea8a41a0020abbdd465216db2a69a42a3ad7546ea4b504"} Nov 24 17:17:05 crc kubenswrapper[4760]: I1124 17:17:05.642991 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:17:05 crc kubenswrapper[4760]: I1124 17:17:05.643686 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:17:05 crc kubenswrapper[4760]: I1124 17:17:05.643741 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:17:05 crc kubenswrapper[4760]: I1124 17:17:05.644458 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:17:05 crc kubenswrapper[4760]: I1124 17:17:05.644546 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656" gracePeriod=600 Nov 24 17:17:06 crc kubenswrapper[4760]: I1124 17:17:06.258893 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656" exitCode=0 Nov 24 17:17:06 crc kubenswrapper[4760]: I1124 17:17:06.259042 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656"} Nov 24 17:17:06 crc kubenswrapper[4760]: I1124 17:17:06.259236 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8"} Nov 24 17:17:06 crc kubenswrapper[4760]: I1124 17:17:06.259265 4760 scope.go:117] "RemoveContainer" containerID="28aa4a21b3828caf19f86ef80042f17ff82b2d6bbe8b627e35198893af6325e3" Nov 24 17:17:06 crc kubenswrapper[4760]: I1124 17:17:06.263440 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" event={"ID":"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44","Type":"ContainerStarted","Data":"37ab2fc37fd1977ea1f4d3c33e797204ee2cc07252e0033ed41cb326a6e25d70"} Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.674258 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.677617 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.691519 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.759362 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28vn4\" (UniqueName: \"kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.759467 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.759495 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.861486 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28vn4\" (UniqueName: \"kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.861642 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.861704 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.862341 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.862500 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:07 crc kubenswrapper[4760]: I1124 17:17:07.892152 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28vn4\" (UniqueName: \"kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4\") pod \"community-operators-vt2m4\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:08 crc kubenswrapper[4760]: I1124 17:17:08.025407 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:08 crc kubenswrapper[4760]: I1124 17:17:08.280861 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" event={"ID":"20782ce3-a28a-4fa7-a4c1-ae186c4e9f44","Type":"ContainerStarted","Data":"89b4f8901c62e57ee252c2edd242e9ec90236a690c96de74cf03c767e6d003dd"} Nov 24 17:17:08 crc kubenswrapper[4760]: I1124 17:17:08.281583 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:08 crc kubenswrapper[4760]: I1124 17:17:08.457443 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" podStartSLOduration=2.154022918 podStartE2EDuration="8.45742505s" podCreationTimestamp="2025-11-24 17:17:00 +0000 UTC" firstStartedPulling="2025-11-24 17:17:01.285052482 +0000 UTC m=+816.607934032" lastFinishedPulling="2025-11-24 17:17:07.588454604 +0000 UTC m=+822.911336164" observedRunningTime="2025-11-24 17:17:08.314771875 +0000 UTC m=+823.637653435" watchObservedRunningTime="2025-11-24 17:17:08.45742505 +0000 UTC m=+823.780306600" Nov 24 17:17:08 crc kubenswrapper[4760]: I1124 17:17:08.467520 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:09 crc kubenswrapper[4760]: I1124 17:17:09.291659 4760 generic.go:334] "Generic (PLEG): container finished" podID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerID="1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c" exitCode=0 Nov 24 17:17:09 crc kubenswrapper[4760]: I1124 17:17:09.293114 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerDied","Data":"1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c"} Nov 24 17:17:09 crc kubenswrapper[4760]: I1124 17:17:09.293161 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerStarted","Data":"a7db136b35e4c06c53f9c4952a79c15fe649c32bc9c276ab2e24284b46703828"} Nov 24 17:17:10 crc kubenswrapper[4760]: I1124 17:17:10.303608 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-d5ff89cd9-mtcgb" Nov 24 17:17:11 crc kubenswrapper[4760]: I1124 17:17:11.308976 4760 generic.go:334] "Generic (PLEG): container finished" podID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerID="ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4" exitCode=0 Nov 24 17:17:11 crc kubenswrapper[4760]: I1124 17:17:11.309058 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerDied","Data":"ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4"} Nov 24 17:17:12 crc kubenswrapper[4760]: I1124 17:17:12.316798 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerStarted","Data":"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c"} Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.868353 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vt2m4" podStartSLOduration=6.4692911 podStartE2EDuration="8.868326278s" podCreationTimestamp="2025-11-24 17:17:07 +0000 UTC" firstStartedPulling="2025-11-24 17:17:09.294961045 +0000 UTC m=+824.617842635" lastFinishedPulling="2025-11-24 17:17:11.693996253 +0000 UTC m=+827.016877813" observedRunningTime="2025-11-24 17:17:12.334607923 +0000 UTC m=+827.657489473" watchObservedRunningTime="2025-11-24 17:17:15.868326278 +0000 UTC m=+831.191207828" Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.869498 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.870599 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.955630 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.977033 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.977108 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:15 crc kubenswrapper[4760]: I1124 17:17:15.977134 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9d2j\" (UniqueName: \"kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.078816 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.078906 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.078929 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9d2j\" (UniqueName: \"kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.079378 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.079378 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.107084 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9d2j\" (UniqueName: \"kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j\") pod \"certified-operators-88vbt\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.190356 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:16 crc kubenswrapper[4760]: I1124 17:17:16.705219 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:17 crc kubenswrapper[4760]: I1124 17:17:17.354810 4760 generic.go:334] "Generic (PLEG): container finished" podID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerID="762b521261dc91617f4c13370a33345843eee40c5f215507572cb54447e87bed" exitCode=0 Nov 24 17:17:17 crc kubenswrapper[4760]: I1124 17:17:17.354905 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerDied","Data":"762b521261dc91617f4c13370a33345843eee40c5f215507572cb54447e87bed"} Nov 24 17:17:17 crc kubenswrapper[4760]: I1124 17:17:17.355201 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerStarted","Data":"b8dc44359a3e9e10f85b88accf0c2e6b1d094ff3d3fa1c02c705472db7072afe"} Nov 24 17:17:18 crc kubenswrapper[4760]: I1124 17:17:18.025675 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:18 crc kubenswrapper[4760]: I1124 17:17:18.025715 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:18 crc kubenswrapper[4760]: I1124 17:17:18.089595 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:18 crc kubenswrapper[4760]: I1124 17:17:18.364288 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerStarted","Data":"028d740eed8c82f9280251d3a0879a0e71e53ae0068cda3e5900ebc2fdf186f1"} Nov 24 17:17:18 crc kubenswrapper[4760]: I1124 17:17:18.406459 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:19 crc kubenswrapper[4760]: I1124 17:17:19.375404 4760 generic.go:334] "Generic (PLEG): container finished" podID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerID="028d740eed8c82f9280251d3a0879a0e71e53ae0068cda3e5900ebc2fdf186f1" exitCode=0 Nov 24 17:17:19 crc kubenswrapper[4760]: I1124 17:17:19.375504 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerDied","Data":"028d740eed8c82f9280251d3a0879a0e71e53ae0068cda3e5900ebc2fdf186f1"} Nov 24 17:17:20 crc kubenswrapper[4760]: I1124 17:17:20.385723 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerStarted","Data":"75763cba45bf8a2028a8bb1a18bd04faa5b663fc64c104789d41ae8768f7f345"} Nov 24 17:17:20 crc kubenswrapper[4760]: I1124 17:17:20.442354 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-88vbt" podStartSLOduration=3.024753281 podStartE2EDuration="5.442333197s" podCreationTimestamp="2025-11-24 17:17:15 +0000 UTC" firstStartedPulling="2025-11-24 17:17:17.356868716 +0000 UTC m=+832.679750266" lastFinishedPulling="2025-11-24 17:17:19.774448622 +0000 UTC m=+835.097330182" observedRunningTime="2025-11-24 17:17:20.435595868 +0000 UTC m=+835.758477438" watchObservedRunningTime="2025-11-24 17:17:20.442333197 +0000 UTC m=+835.765214767" Nov 24 17:17:21 crc kubenswrapper[4760]: I1124 17:17:21.666995 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:21 crc kubenswrapper[4760]: I1124 17:17:21.667504 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vt2m4" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="registry-server" containerID="cri-o://0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c" gracePeriod=2 Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.094501 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.163989 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28vn4\" (UniqueName: \"kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4\") pod \"dbd6fc70-3741-4740-ac2d-da6d1d127342\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.164080 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content\") pod \"dbd6fc70-3741-4740-ac2d-da6d1d127342\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.164179 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities\") pod \"dbd6fc70-3741-4740-ac2d-da6d1d127342\" (UID: \"dbd6fc70-3741-4740-ac2d-da6d1d127342\") " Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.165124 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities" (OuterVolumeSpecName: "utilities") pod "dbd6fc70-3741-4740-ac2d-da6d1d127342" (UID: "dbd6fc70-3741-4740-ac2d-da6d1d127342"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.172765 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4" (OuterVolumeSpecName: "kube-api-access-28vn4") pod "dbd6fc70-3741-4740-ac2d-da6d1d127342" (UID: "dbd6fc70-3741-4740-ac2d-da6d1d127342"). InnerVolumeSpecName "kube-api-access-28vn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.208578 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbd6fc70-3741-4740-ac2d-da6d1d127342" (UID: "dbd6fc70-3741-4740-ac2d-da6d1d127342"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.264983 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.265032 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28vn4\" (UniqueName: \"kubernetes.io/projected/dbd6fc70-3741-4740-ac2d-da6d1d127342-kube-api-access-28vn4\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.265043 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbd6fc70-3741-4740-ac2d-da6d1d127342-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.402647 4760 generic.go:334] "Generic (PLEG): container finished" podID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerID="0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c" exitCode=0 Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.402688 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerDied","Data":"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c"} Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.402714 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vt2m4" event={"ID":"dbd6fc70-3741-4740-ac2d-da6d1d127342","Type":"ContainerDied","Data":"a7db136b35e4c06c53f9c4952a79c15fe649c32bc9c276ab2e24284b46703828"} Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.402732 4760 scope.go:117] "RemoveContainer" containerID="0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.402832 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vt2m4" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.433249 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.438867 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vt2m4"] Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.441073 4760 scope.go:117] "RemoveContainer" containerID="ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.469167 4760 scope.go:117] "RemoveContainer" containerID="1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.502495 4760 scope.go:117] "RemoveContainer" containerID="0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c" Nov 24 17:17:22 crc kubenswrapper[4760]: E1124 17:17:22.503122 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c\": container with ID starting with 0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c not found: ID does not exist" containerID="0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.503178 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c"} err="failed to get container status \"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c\": rpc error: code = NotFound desc = could not find container \"0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c\": container with ID starting with 0be65602edcd30d4917387c15bfe6db5a77d5de95949e4c988b57389b341912c not found: ID does not exist" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.503211 4760 scope.go:117] "RemoveContainer" containerID="ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4" Nov 24 17:17:22 crc kubenswrapper[4760]: E1124 17:17:22.503806 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4\": container with ID starting with ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4 not found: ID does not exist" containerID="ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.503855 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4"} err="failed to get container status \"ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4\": rpc error: code = NotFound desc = could not find container \"ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4\": container with ID starting with ff5c80b151e21ea9543782ac989775508569f937ebdec42ccaaf29bbfccc50a4 not found: ID does not exist" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.503874 4760 scope.go:117] "RemoveContainer" containerID="1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c" Nov 24 17:17:22 crc kubenswrapper[4760]: E1124 17:17:22.504281 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c\": container with ID starting with 1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c not found: ID does not exist" containerID="1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c" Nov 24 17:17:22 crc kubenswrapper[4760]: I1124 17:17:22.504348 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c"} err="failed to get container status \"1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c\": rpc error: code = NotFound desc = could not find container \"1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c\": container with ID starting with 1a27cc64653fa928b3db24aab77eb9ec7ab9310b5458a57c97f53be5034ed91c not found: ID does not exist" Nov 24 17:17:23 crc kubenswrapper[4760]: I1124 17:17:23.475278 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" path="/var/lib/kubelet/pods/dbd6fc70-3741-4740-ac2d-da6d1d127342/volumes" Nov 24 17:17:26 crc kubenswrapper[4760]: I1124 17:17:26.191511 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:26 crc kubenswrapper[4760]: I1124 17:17:26.191871 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:26 crc kubenswrapper[4760]: I1124 17:17:26.269765 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:26 crc kubenswrapper[4760]: I1124 17:17:26.527519 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.567512 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw"] Nov 24 17:17:27 crc kubenswrapper[4760]: E1124 17:17:27.568154 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="extract-content" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.568171 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="extract-content" Nov 24 17:17:27 crc kubenswrapper[4760]: E1124 17:17:27.568189 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="registry-server" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.568197 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="registry-server" Nov 24 17:17:27 crc kubenswrapper[4760]: E1124 17:17:27.568217 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="extract-utilities" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.568227 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="extract-utilities" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.568353 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbd6fc70-3741-4740-ac2d-da6d1d127342" containerName="registry-server" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.569087 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.580806 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-r7k7t" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.587913 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.589132 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.591703 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.596159 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.598032 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-dzxl7" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.657333 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.659262 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftnvf\" (UniqueName: \"kubernetes.io/projected/c07ab946-dbd4-4fbf-b17c-7bfa133e1c96-kube-api-access-ftnvf\") pod \"barbican-operator-controller-manager-75fb479bcc-f97qw\" (UID: \"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.659345 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khlq6\" (UniqueName: \"kubernetes.io/projected/8a48d8a2-3c00-4a6e-b88f-dab093355874-kube-api-access-khlq6\") pod \"cinder-operator-controller-manager-6498cbf48f-9fscr\" (UID: \"8a48d8a2-3c00-4a6e-b88f-dab093355874\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.662612 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.680607 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-jkgzd" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.716180 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.752173 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-znhd6"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.753237 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.757575 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-w968t" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.758100 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-znhd6"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.763039 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftnvf\" (UniqueName: \"kubernetes.io/projected/c07ab946-dbd4-4fbf-b17c-7bfa133e1c96-kube-api-access-ftnvf\") pod \"barbican-operator-controller-manager-75fb479bcc-f97qw\" (UID: \"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.763084 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9m5z\" (UniqueName: \"kubernetes.io/projected/df86f3d1-75ea-4757-8115-1440d92160b6-kube-api-access-c9m5z\") pod \"designate-operator-controller-manager-767ccfd65f-6tfrh\" (UID: \"df86f3d1-75ea-4757-8115-1440d92160b6\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.763150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khlq6\" (UniqueName: \"kubernetes.io/projected/8a48d8a2-3c00-4a6e-b88f-dab093355874-kube-api-access-khlq6\") pod \"cinder-operator-controller-manager-6498cbf48f-9fscr\" (UID: \"8a48d8a2-3c00-4a6e-b88f-dab093355874\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.770136 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.771162 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.773660 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.775567 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.776329 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-5kwgl" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.779595 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-mqvtj" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.802080 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khlq6\" (UniqueName: \"kubernetes.io/projected/8a48d8a2-3c00-4a6e-b88f-dab093355874-kube-api-access-khlq6\") pod \"cinder-operator-controller-manager-6498cbf48f-9fscr\" (UID: \"8a48d8a2-3c00-4a6e-b88f-dab093355874\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.802231 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftnvf\" (UniqueName: \"kubernetes.io/projected/c07ab946-dbd4-4fbf-b17c-7bfa133e1c96-kube-api-access-ftnvf\") pod \"barbican-operator-controller-manager-75fb479bcc-f97qw\" (UID: \"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.804952 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.806025 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.814771 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.814950 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-b98xg" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.826212 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.838610 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.842741 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.843812 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.848170 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.848388 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-dbzz5" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.856242 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.865888 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9m5z\" (UniqueName: \"kubernetes.io/projected/df86f3d1-75ea-4757-8115-1440d92160b6-kube-api-access-c9m5z\") pod \"designate-operator-controller-manager-767ccfd65f-6tfrh\" (UID: \"df86f3d1-75ea-4757-8115-1440d92160b6\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.865940 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtdf4\" (UniqueName: \"kubernetes.io/projected/e3c878c9-0549-4e8b-bb1a-2754b8a8d402-kube-api-access-dtdf4\") pod \"glance-operator-controller-manager-7969689c84-znhd6\" (UID: \"e3c878c9-0549-4e8b-bb1a-2754b8a8d402\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.865966 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.865989 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58kp5\" (UniqueName: \"kubernetes.io/projected/11f165ab-07bd-46ce-ad35-5b349c9b16be-kube-api-access-58kp5\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.866044 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqphc\" (UniqueName: \"kubernetes.io/projected/abd30b3d-1e1d-4a1d-b4b6-aaf500949015-kube-api-access-gqphc\") pod \"heat-operator-controller-manager-56f54d6746-dm4k7\" (UID: \"abd30b3d-1e1d-4a1d-b4b6-aaf500949015\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.866071 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z87f\" (UniqueName: \"kubernetes.io/projected/981e3771-3dd1-4e3d-9601-7c16bbc22c8f-kube-api-access-6z87f\") pod \"horizon-operator-controller-manager-598f69df5d-95w5b\" (UID: \"981e3771-3dd1-4e3d-9601-7c16bbc22c8f\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.876074 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.877069 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.882197 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-ch9qz" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.889325 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.898335 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.905205 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-87pfs"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.906205 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.916213 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9m5z\" (UniqueName: \"kubernetes.io/projected/df86f3d1-75ea-4757-8115-1440d92160b6-kube-api-access-c9m5z\") pod \"designate-operator-controller-manager-767ccfd65f-6tfrh\" (UID: \"df86f3d1-75ea-4757-8115-1440d92160b6\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.925703 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-87pfs"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.938782 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-z4m97" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.939409 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.940362 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.941536 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-jdclb" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.944140 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.945073 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.953145 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-ccr9k" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.963433 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.963820 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z"] Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.974884 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grbgf\" (UniqueName: \"kubernetes.io/projected/93232e72-070f-4a46-89da-983cd8abe0b5-kube-api-access-grbgf\") pod \"ironic-operator-controller-manager-99b499f4-zd54m\" (UID: \"93232e72-070f-4a46-89da-983cd8abe0b5\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.974925 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqphc\" (UniqueName: \"kubernetes.io/projected/abd30b3d-1e1d-4a1d-b4b6-aaf500949015-kube-api-access-gqphc\") pod \"heat-operator-controller-manager-56f54d6746-dm4k7\" (UID: \"abd30b3d-1e1d-4a1d-b4b6-aaf500949015\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.974947 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jtq8\" (UniqueName: \"kubernetes.io/projected/d7eea786-ecee-41f0-9a52-7ac9bef2f874-kube-api-access-8jtq8\") pod \"manila-operator-controller-manager-58f887965d-87pfs\" (UID: \"d7eea786-ecee-41f0-9a52-7ac9bef2f874\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.974978 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z87f\" (UniqueName: \"kubernetes.io/projected/981e3771-3dd1-4e3d-9601-7c16bbc22c8f-kube-api-access-6z87f\") pod \"horizon-operator-controller-manager-598f69df5d-95w5b\" (UID: \"981e3771-3dd1-4e3d-9601-7c16bbc22c8f\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.975018 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trkfm\" (UniqueName: \"kubernetes.io/projected/c0da29f6-094e-499d-90ea-93ddfe52e165-kube-api-access-trkfm\") pod \"keystone-operator-controller-manager-7454b96578-wgd79\" (UID: \"c0da29f6-094e-499d-90ea-93ddfe52e165\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.975050 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtdf4\" (UniqueName: \"kubernetes.io/projected/e3c878c9-0549-4e8b-bb1a-2754b8a8d402-kube-api-access-dtdf4\") pod \"glance-operator-controller-manager-7969689c84-znhd6\" (UID: \"e3c878c9-0549-4e8b-bb1a-2754b8a8d402\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.975076 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:27 crc kubenswrapper[4760]: I1124 17:17:27.975097 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58kp5\" (UniqueName: \"kubernetes.io/projected/11f165ab-07bd-46ce-ad35-5b349c9b16be-kube-api-access-58kp5\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:27 crc kubenswrapper[4760]: E1124 17:17:27.975615 4760 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 24 17:17:27 crc kubenswrapper[4760]: E1124 17:17:27.975651 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert podName:11f165ab-07bd-46ce-ad35-5b349c9b16be nodeName:}" failed. No retries permitted until 2025-11-24 17:17:28.475637303 +0000 UTC m=+843.798518853 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert") pod "infra-operator-controller-manager-6dd8864d7c-jxvzz" (UID: "11f165ab-07bd-46ce-ad35-5b349c9b16be") : secret "infra-operator-webhook-server-cert" not found Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.008074 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.018792 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z87f\" (UniqueName: \"kubernetes.io/projected/981e3771-3dd1-4e3d-9601-7c16bbc22c8f-kube-api-access-6z87f\") pod \"horizon-operator-controller-manager-598f69df5d-95w5b\" (UID: \"981e3771-3dd1-4e3d-9601-7c16bbc22c8f\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.019113 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.020121 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.020821 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqphc\" (UniqueName: \"kubernetes.io/projected/abd30b3d-1e1d-4a1d-b4b6-aaf500949015-kube-api-access-gqphc\") pod \"heat-operator-controller-manager-56f54d6746-dm4k7\" (UID: \"abd30b3d-1e1d-4a1d-b4b6-aaf500949015\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.021444 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58kp5\" (UniqueName: \"kubernetes.io/projected/11f165ab-07bd-46ce-ad35-5b349c9b16be-kube-api-access-58kp5\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.025389 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-4n295" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.030631 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtdf4\" (UniqueName: \"kubernetes.io/projected/e3c878c9-0549-4e8b-bb1a-2754b8a8d402-kube-api-access-dtdf4\") pod \"glance-operator-controller-manager-7969689c84-znhd6\" (UID: \"e3c878c9-0549-4e8b-bb1a-2754b8a8d402\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.030926 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.051068 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.075883 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mdp2\" (UniqueName: \"kubernetes.io/projected/dc1dfda1-793b-4b06-a228-0e5472915f76-kube-api-access-7mdp2\") pod \"neutron-operator-controller-manager-78bd47f458-j6f4z\" (UID: \"dc1dfda1-793b-4b06-a228-0e5472915f76\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.075931 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhdmq\" (UniqueName: \"kubernetes.io/projected/51bd5ae4-002b-40c4-bd9e-b6d087bfdaba-kube-api-access-lhdmq\") pod \"nova-operator-controller-manager-cfbb9c588-r7dzv\" (UID: \"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.076016 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grbgf\" (UniqueName: \"kubernetes.io/projected/93232e72-070f-4a46-89da-983cd8abe0b5-kube-api-access-grbgf\") pod \"ironic-operator-controller-manager-99b499f4-zd54m\" (UID: \"93232e72-070f-4a46-89da-983cd8abe0b5\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.076043 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jtq8\" (UniqueName: \"kubernetes.io/projected/d7eea786-ecee-41f0-9a52-7ac9bef2f874-kube-api-access-8jtq8\") pod \"manila-operator-controller-manager-58f887965d-87pfs\" (UID: \"d7eea786-ecee-41f0-9a52-7ac9bef2f874\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.076094 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trkfm\" (UniqueName: \"kubernetes.io/projected/c0da29f6-094e-499d-90ea-93ddfe52e165-kube-api-access-trkfm\") pod \"keystone-operator-controller-manager-7454b96578-wgd79\" (UID: \"c0da29f6-094e-499d-90ea-93ddfe52e165\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.076112 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcp8f\" (UniqueName: \"kubernetes.io/projected/a29a1da0-a007-4d2d-8ca2-0a3f78e4d995-kube-api-access-rcp8f\") pod \"mariadb-operator-controller-manager-54b5986bb8-fl68l\" (UID: \"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.094881 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.095897 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.096324 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.103278 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.104277 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.104865 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-flc8m" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.105461 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.107954 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-htwbv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.110260 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.119625 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jtq8\" (UniqueName: \"kubernetes.io/projected/d7eea786-ecee-41f0-9a52-7ac9bef2f874-kube-api-access-8jtq8\") pod \"manila-operator-controller-manager-58f887965d-87pfs\" (UID: \"d7eea786-ecee-41f0-9a52-7ac9bef2f874\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.131588 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trkfm\" (UniqueName: \"kubernetes.io/projected/c0da29f6-094e-499d-90ea-93ddfe52e165-kube-api-access-trkfm\") pod \"keystone-operator-controller-manager-7454b96578-wgd79\" (UID: \"c0da29f6-094e-499d-90ea-93ddfe52e165\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.132313 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.154653 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grbgf\" (UniqueName: \"kubernetes.io/projected/93232e72-070f-4a46-89da-983cd8abe0b5-kube-api-access-grbgf\") pod \"ironic-operator-controller-manager-99b499f4-zd54m\" (UID: \"93232e72-070f-4a46-89da-983cd8abe0b5\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.157178 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.173089 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184799 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdhr4\" (UniqueName: \"kubernetes.io/projected/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-kube-api-access-rdhr4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184844 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcp8f\" (UniqueName: \"kubernetes.io/projected/a29a1da0-a007-4d2d-8ca2-0a3f78e4d995-kube-api-access-rcp8f\") pod \"mariadb-operator-controller-manager-54b5986bb8-fl68l\" (UID: \"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184865 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdgpb\" (UniqueName: \"kubernetes.io/projected/f43007f0-7615-44a1-8594-dd0b0adbded6-kube-api-access-hdgpb\") pod \"octavia-operator-controller-manager-54cfbf4c7d-hn7wz\" (UID: \"f43007f0-7615-44a1-8594-dd0b0adbded6\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184894 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mdp2\" (UniqueName: \"kubernetes.io/projected/dc1dfda1-793b-4b06-a228-0e5472915f76-kube-api-access-7mdp2\") pod \"neutron-operator-controller-manager-78bd47f458-j6f4z\" (UID: \"dc1dfda1-793b-4b06-a228-0e5472915f76\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184913 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.184931 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhdmq\" (UniqueName: \"kubernetes.io/projected/51bd5ae4-002b-40c4-bd9e-b6d087bfdaba-kube-api-access-lhdmq\") pod \"nova-operator-controller-manager-cfbb9c588-r7dzv\" (UID: \"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.192173 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.224283 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhdmq\" (UniqueName: \"kubernetes.io/projected/51bd5ae4-002b-40c4-bd9e-b6d087bfdaba-kube-api-access-lhdmq\") pod \"nova-operator-controller-manager-cfbb9c588-r7dzv\" (UID: \"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.225059 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcp8f\" (UniqueName: \"kubernetes.io/projected/a29a1da0-a007-4d2d-8ca2-0a3f78e4d995-kube-api-access-rcp8f\") pod \"mariadb-operator-controller-manager-54b5986bb8-fl68l\" (UID: \"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.230095 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mdp2\" (UniqueName: \"kubernetes.io/projected/dc1dfda1-793b-4b06-a228-0e5472915f76-kube-api-access-7mdp2\") pod \"neutron-operator-controller-manager-78bd47f458-j6f4z\" (UID: \"dc1dfda1-793b-4b06-a228-0e5472915f76\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.261515 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.266694 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.270803 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.272596 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.275824 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-4qg47" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.276029 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-dr8fb" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.286591 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdhr4\" (UniqueName: \"kubernetes.io/projected/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-kube-api-access-rdhr4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.286650 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdgpb\" (UniqueName: \"kubernetes.io/projected/f43007f0-7615-44a1-8594-dd0b0adbded6-kube-api-access-hdgpb\") pod \"octavia-operator-controller-manager-54cfbf4c7d-hn7wz\" (UID: \"f43007f0-7615-44a1-8594-dd0b0adbded6\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.286681 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: E1124 17:17:28.287487 4760 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 17:17:28 crc kubenswrapper[4760]: E1124 17:17:28.287623 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert podName:2ebc4c96-b0e9-4f9f-950b-5af42b867a8a nodeName:}" failed. No retries permitted until 2025-11-24 17:17:28.787600523 +0000 UTC m=+844.110482073 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert") pod "openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" (UID: "2ebc4c96-b0e9-4f9f-950b-5af42b867a8a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.308480 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.319849 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdgpb\" (UniqueName: \"kubernetes.io/projected/f43007f0-7615-44a1-8594-dd0b0adbded6-kube-api-access-hdgpb\") pod \"octavia-operator-controller-manager-54cfbf4c7d-hn7wz\" (UID: \"f43007f0-7615-44a1-8594-dd0b0adbded6\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.328275 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-mnszq"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.329951 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.333923 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.335371 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.335499 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.335856 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-svhwg" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.338631 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-vgq6g" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.344174 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.351817 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdhr4\" (UniqueName: \"kubernetes.io/projected/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-kube-api-access-rdhr4\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.355779 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.364223 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-mnszq"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.386500 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.394188 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpctg\" (UniqueName: \"kubernetes.io/projected/b075e65d-1bff-4853-9f78-339a20dde0d8-kube-api-access-hpctg\") pod \"placement-operator-controller-manager-5b797b8dff-9cxcs\" (UID: \"b075e65d-1bff-4853-9f78-339a20dde0d8\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.394227 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mvtd\" (UniqueName: \"kubernetes.io/projected/3ead61e1-d87a-44bb-8144-3198f06976c4-kube-api-access-6mvtd\") pod \"ovn-operator-controller-manager-54fc5f65b7-tjtzh\" (UID: \"3ead61e1-d87a-44bb-8144-3198f06976c4\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.394283 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmshk\" (UniqueName: \"kubernetes.io/projected/12583812-acca-4939-9358-17b4bb668450-kube-api-access-kmshk\") pod \"telemetry-operator-controller-manager-54d7678447-gcrcj\" (UID: \"12583812-acca-4939-9358-17b4bb668450\") " pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.394311 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbgsw\" (UniqueName: \"kubernetes.io/projected/43e5759b-21f0-45be-a96b-c0c86229273f-kube-api-access-lbgsw\") pod \"swift-operator-controller-manager-d656998f4-mnszq\" (UID: \"43e5759b-21f0-45be-a96b-c0c86229273f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.394885 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-qn926"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.400537 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.401475 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.410240 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-brjjw" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.422339 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-qn926"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.432472 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.437614 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.438902 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.444061 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-v86vh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.456472 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.490498 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.494324 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495618 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmshk\" (UniqueName: \"kubernetes.io/projected/12583812-acca-4939-9358-17b4bb668450-kube-api-access-kmshk\") pod \"telemetry-operator-controller-manager-54d7678447-gcrcj\" (UID: \"12583812-acca-4939-9358-17b4bb668450\") " pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495656 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttttv\" (UniqueName: \"kubernetes.io/projected/6a3853ba-f14b-4d13-96c5-7b7a590086ca-kube-api-access-ttttv\") pod \"watcher-operator-controller-manager-8c6448b9f-8jmvq\" (UID: \"6a3853ba-f14b-4d13-96c5-7b7a590086ca\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495686 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbgsw\" (UniqueName: \"kubernetes.io/projected/43e5759b-21f0-45be-a96b-c0c86229273f-kube-api-access-lbgsw\") pod \"swift-operator-controller-manager-d656998f4-mnszq\" (UID: \"43e5759b-21f0-45be-a96b-c0c86229273f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495709 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76fc6\" (UniqueName: \"kubernetes.io/projected/5b5f6f3c-636d-4507-8c3d-51c1ac4693d6-kube-api-access-76fc6\") pod \"test-operator-controller-manager-b4c496f69-qn926\" (UID: \"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495757 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpctg\" (UniqueName: \"kubernetes.io/projected/b075e65d-1bff-4853-9f78-339a20dde0d8-kube-api-access-hpctg\") pod \"placement-operator-controller-manager-5b797b8dff-9cxcs\" (UID: \"b075e65d-1bff-4853-9f78-339a20dde0d8\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495779 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mvtd\" (UniqueName: \"kubernetes.io/projected/3ead61e1-d87a-44bb-8144-3198f06976c4-kube-api-access-6mvtd\") pod \"ovn-operator-controller-manager-54fc5f65b7-tjtzh\" (UID: \"3ead61e1-d87a-44bb-8144-3198f06976c4\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.495820 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.499822 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.499961 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.500344 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/11f165ab-07bd-46ce-ad35-5b349c9b16be-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-jxvzz\" (UID: \"11f165ab-07bd-46ce-ad35-5b349c9b16be\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.500927 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-49hhv" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.517731 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbgsw\" (UniqueName: \"kubernetes.io/projected/43e5759b-21f0-45be-a96b-c0c86229273f-kube-api-access-lbgsw\") pod \"swift-operator-controller-manager-d656998f4-mnszq\" (UID: \"43e5759b-21f0-45be-a96b-c0c86229273f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.519732 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mvtd\" (UniqueName: \"kubernetes.io/projected/3ead61e1-d87a-44bb-8144-3198f06976c4-kube-api-access-6mvtd\") pod \"ovn-operator-controller-manager-54fc5f65b7-tjtzh\" (UID: \"3ead61e1-d87a-44bb-8144-3198f06976c4\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.525798 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmshk\" (UniqueName: \"kubernetes.io/projected/12583812-acca-4939-9358-17b4bb668450-kube-api-access-kmshk\") pod \"telemetry-operator-controller-manager-54d7678447-gcrcj\" (UID: \"12583812-acca-4939-9358-17b4bb668450\") " pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.525849 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpctg\" (UniqueName: \"kubernetes.io/projected/b075e65d-1bff-4853-9f78-339a20dde0d8-kube-api-access-hpctg\") pod \"placement-operator-controller-manager-5b797b8dff-9cxcs\" (UID: \"b075e65d-1bff-4853-9f78-339a20dde0d8\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.534163 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.537450 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.540648 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.542246 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.544043 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.546552 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-gwx6w" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.599448 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttttv\" (UniqueName: \"kubernetes.io/projected/6a3853ba-f14b-4d13-96c5-7b7a590086ca-kube-api-access-ttttv\") pod \"watcher-operator-controller-manager-8c6448b9f-8jmvq\" (UID: \"6a3853ba-f14b-4d13-96c5-7b7a590086ca\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.599521 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76fc6\" (UniqueName: \"kubernetes.io/projected/5b5f6f3c-636d-4507-8c3d-51c1ac4693d6-kube-api-access-76fc6\") pod \"test-operator-controller-manager-b4c496f69-qn926\" (UID: \"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.599567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.599600 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwg2q\" (UniqueName: \"kubernetes.io/projected/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-kube-api-access-pwg2q\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.599643 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dntk6\" (UniqueName: \"kubernetes.io/projected/df2ff3b3-46c0-4a51-bac9-e19df21c24fa-kube-api-access-dntk6\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-b26r2\" (UID: \"df2ff3b3-46c0-4a51-bac9-e19df21c24fa\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.618661 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76fc6\" (UniqueName: \"kubernetes.io/projected/5b5f6f3c-636d-4507-8c3d-51c1ac4693d6-kube-api-access-76fc6\") pod \"test-operator-controller-manager-b4c496f69-qn926\" (UID: \"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.623084 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttttv\" (UniqueName: \"kubernetes.io/projected/6a3853ba-f14b-4d13-96c5-7b7a590086ca-kube-api-access-ttttv\") pod \"watcher-operator-controller-manager-8c6448b9f-8jmvq\" (UID: \"6a3853ba-f14b-4d13-96c5-7b7a590086ca\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.661290 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.673707 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.695010 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.701264 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.701316 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwg2q\" (UniqueName: \"kubernetes.io/projected/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-kube-api-access-pwg2q\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.701363 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dntk6\" (UniqueName: \"kubernetes.io/projected/df2ff3b3-46c0-4a51-bac9-e19df21c24fa-kube-api-access-dntk6\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-b26r2\" (UID: \"df2ff3b3-46c0-4a51-bac9-e19df21c24fa\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" Nov 24 17:17:28 crc kubenswrapper[4760]: E1124 17:17:28.701422 4760 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 17:17:28 crc kubenswrapper[4760]: E1124 17:17:28.701508 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert podName:ca3d8449-fd16-491b-bd2e-06dcd9103bdf nodeName:}" failed. No retries permitted until 2025-11-24 17:17:29.201484685 +0000 UTC m=+844.524366235 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert") pod "openstack-operator-controller-manager-d5d9ddcff-zjhwp" (UID: "ca3d8449-fd16-491b-bd2e-06dcd9103bdf") : secret "webhook-server-cert" not found Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.731532 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwg2q\" (UniqueName: \"kubernetes.io/projected/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-kube-api-access-pwg2q\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.746214 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.754156 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dntk6\" (UniqueName: \"kubernetes.io/projected/df2ff3b3-46c0-4a51-bac9-e19df21c24fa-kube-api-access-dntk6\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-b26r2\" (UID: \"df2ff3b3-46c0-4a51-bac9-e19df21c24fa\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.765760 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.776682 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.788110 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.803127 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.805418 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.825161 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2ebc4c96-b0e9-4f9f-950b-5af42b867a8a-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm\" (UID: \"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.862330 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.865116 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.870904 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr"] Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.879611 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" Nov 24 17:17:28 crc kubenswrapper[4760]: I1124 17:17:28.992367 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-znhd6"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.113520 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.116185 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-88vbt" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="registry-server" containerID="cri-o://75763cba45bf8a2028a8bb1a18bd04faa5b663fc64c104789d41ae8768f7f345" gracePeriod=2 Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.163608 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-87pfs"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.179284 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7eea786_ecee_41f0_9a52_7ac9bef2f874.slice/crio-60c062cbbd7fd2b31cbd29a129da4003d61c2befc30282193aacd3b5e84997ac WatchSource:0}: Error finding container 60c062cbbd7fd2b31cbd29a129da4003d61c2befc30282193aacd3b5e84997ac: Status 404 returned error can't find the container with id 60c062cbbd7fd2b31cbd29a129da4003d61c2befc30282193aacd3b5e84997ac Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.217966 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.222187 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ca3d8449-fd16-491b-bd2e-06dcd9103bdf-cert\") pod \"openstack-operator-controller-manager-d5d9ddcff-zjhwp\" (UID: \"ca3d8449-fd16-491b-bd2e-06dcd9103bdf\") " pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.380379 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.387845 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda29a1da0_a007_4d2d_8ca2_0a3f78e4d995.slice/crio-bbe3d30ef7ab91a360059e2481ef65de5e70a8733cbdce1d9514fc00237be108 WatchSource:0}: Error finding container bbe3d30ef7ab91a360059e2481ef65de5e70a8733cbdce1d9514fc00237be108: Status 404 returned error can't find the container with id bbe3d30ef7ab91a360059e2481ef65de5e70a8733cbdce1d9514fc00237be108 Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.403927 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.408036 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.413204 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.413310 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93232e72_070f_4a46_89da_983cd8abe0b5.slice/crio-34dee94c74e071359fbe3efc89f5c3e63083a1df62164da92ece199eca9590ae WatchSource:0}: Error finding container 34dee94c74e071359fbe3efc89f5c3e63083a1df62164da92ece199eca9590ae: Status 404 returned error can't find the container with id 34dee94c74e071359fbe3efc89f5c3e63083a1df62164da92ece199eca9590ae Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.416560 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod981e3771_3dd1_4e3d_9601_7c16bbc22c8f.slice/crio-8ff3d7f28a2bf92110fc286cf32761f51b809b8685239c7ae446c8f7584eb6ad WatchSource:0}: Error finding container 8ff3d7f28a2bf92110fc286cf32761f51b809b8685239c7ae446c8f7584eb6ad: Status 404 returned error can't find the container with id 8ff3d7f28a2bf92110fc286cf32761f51b809b8685239c7ae446c8f7584eb6ad Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.416975 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.419605 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc1dfda1_793b_4b06_a228_0e5472915f76.slice/crio-87ff78288e00b84adf368e0a0376a0252b18b1b53a18599bef227f39d3a541ec WatchSource:0}: Error finding container 87ff78288e00b84adf368e0a0376a0252b18b1b53a18599bef227f39d3a541ec: Status 404 returned error can't find the container with id 87ff78288e00b84adf368e0a0376a0252b18b1b53a18599bef227f39d3a541ec Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.422270 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.425437 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0da29f6_094e_499d_90ea_93ddfe52e165.slice/crio-4b904669890bb440bc61057c055779bb92931b3a9f03737165b6acd5343aa1c4 WatchSource:0}: Error finding container 4b904669890bb440bc61057c055779bb92931b3a9f03737165b6acd5343aa1c4: Status 404 returned error can't find the container with id 4b904669890bb440bc61057c055779bb92931b3a9f03737165b6acd5343aa1c4 Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.441148 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.494807 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" event={"ID":"e3c878c9-0549-4e8b-bb1a-2754b8a8d402","Type":"ContainerStarted","Data":"dd46c9c3d517b0595a5f7d9cc58b8aaade0e563a6cf1ebf9a503b798d809024e"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.495497 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" event={"ID":"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995","Type":"ContainerStarted","Data":"bbe3d30ef7ab91a360059e2481ef65de5e70a8733cbdce1d9514fc00237be108"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.499796 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" event={"ID":"981e3771-3dd1-4e3d-9601-7c16bbc22c8f","Type":"ContainerStarted","Data":"8ff3d7f28a2bf92110fc286cf32761f51b809b8685239c7ae446c8f7584eb6ad"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.502937 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" event={"ID":"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96","Type":"ContainerStarted","Data":"14605124723b08d3b835234b451a36c8456291beebb9d3ff7156f870701c5354"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.508393 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" event={"ID":"df86f3d1-75ea-4757-8115-1440d92160b6","Type":"ContainerStarted","Data":"eb0dc88047a38a1a7c9ef454f3330d040db41f0265174a1d39e19a6892230f08"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.511656 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" event={"ID":"dc1dfda1-793b-4b06-a228-0e5472915f76","Type":"ContainerStarted","Data":"87ff78288e00b84adf368e0a0376a0252b18b1b53a18599bef227f39d3a541ec"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.512977 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" event={"ID":"abd30b3d-1e1d-4a1d-b4b6-aaf500949015","Type":"ContainerStarted","Data":"a025bcf0e5ee4ff17021324af5349b9b288f01e43d6240e09f618d9dd9cb2637"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.513863 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" event={"ID":"93232e72-070f-4a46-89da-983cd8abe0b5","Type":"ContainerStarted","Data":"34dee94c74e071359fbe3efc89f5c3e63083a1df62164da92ece199eca9590ae"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.514727 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" event={"ID":"d7eea786-ecee-41f0-9a52-7ac9bef2f874","Type":"ContainerStarted","Data":"60c062cbbd7fd2b31cbd29a129da4003d61c2befc30282193aacd3b5e84997ac"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.515436 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" event={"ID":"8a48d8a2-3c00-4a6e-b88f-dab093355874","Type":"ContainerStarted","Data":"d24e6c97e242e3d1a5fb339bbff7a612a540cf8dafc5d81a9c8e6e6d8d19cbeb"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.516154 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" event={"ID":"c0da29f6-094e-499d-90ea-93ddfe52e165","Type":"ContainerStarted","Data":"4b904669890bb440bc61057c055779bb92931b3a9f03737165b6acd5343aa1c4"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.518462 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerDied","Data":"75763cba45bf8a2028a8bb1a18bd04faa5b663fc64c104789d41ae8768f7f345"} Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.518407 4760 generic.go:334] "Generic (PLEG): container finished" podID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerID="75763cba45bf8a2028a8bb1a18bd04faa5b663fc64c104789d41ae8768f7f345" exitCode=0 Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.575154 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.584402 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12583812_acca_4939_9358_17b4bb668450.slice/crio-ad630955444f3ff4c260410c80a052b51b3ac66cb988f083949e6dbd94686210 WatchSource:0}: Error finding container ad630955444f3ff4c260410c80a052b51b3ac66cb988f083949e6dbd94686210: Status 404 returned error can't find the container with id ad630955444f3ff4c260410c80a052b51b3ac66cb988f083949e6dbd94686210 Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.591734 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.611140 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.619436 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs"] Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.624117 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb075e65d_1bff_4853_9f78_339a20dde0d8.slice/crio-2c6d6c9325af13a38d306bde704f187c7a1a0cac662475bb8db83eac50ed8c6d WatchSource:0}: Error finding container 2c6d6c9325af13a38d306bde704f187c7a1a0cac662475bb8db83eac50ed8c6d: Status 404 returned error can't find the container with id 2c6d6c9325af13a38d306bde704f187c7a1a0cac662475bb8db83eac50ed8c6d Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.625491 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf43007f0_7615_44a1_8594_dd0b0adbded6.slice/crio-f3083a0ce66dc99c26b8abf3da30668ba65beeb459357a8c411bdc09eed2217f WatchSource:0}: Error finding container f3083a0ce66dc99c26b8abf3da30668ba65beeb459357a8c411bdc09eed2217f: Status 404 returned error can't find the container with id f3083a0ce66dc99c26b8abf3da30668ba65beeb459357a8c411bdc09eed2217f Nov 24 17:17:29 crc kubenswrapper[4760]: E1124 17:17:29.627189 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hdgpb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-54cfbf4c7d-hn7wz_openstack-operators(f43007f0-7615-44a1-8594-dd0b0adbded6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.791666 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh"] Nov 24 17:17:29 crc kubenswrapper[4760]: E1124 17:17:29.804519 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6mvtd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-54fc5f65b7-tjtzh_openstack-operators(3ead61e1-d87a-44bb-8144-3198f06976c4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.936949 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-mnszq"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.977361 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq"] Nov 24 17:17:29 crc kubenswrapper[4760]: I1124 17:17:29.982181 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2"] Nov 24 17:17:29 crc kubenswrapper[4760]: E1124 17:17:29.986462 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ttttv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-8c6448b9f-8jmvq_openstack-operators(6a3853ba-f14b-4d13-96c5-7b7a590086ca): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:29 crc kubenswrapper[4760]: W1124 17:17:29.996295 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf2ff3b3_46c0_4a51_bac9_e19df21c24fa.slice/crio-a24a55eeda3587ba23d0ebd494ad8f4d16641b8f2716b5746e7442e49ac3ce73 WatchSource:0}: Error finding container a24a55eeda3587ba23d0ebd494ad8f4d16641b8f2716b5746e7442e49ac3ce73: Status 404 returned error can't find the container with id a24a55eeda3587ba23d0ebd494ad8f4d16641b8f2716b5746e7442e49ac3ce73 Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.000487 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm"] Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.006061 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dntk6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-b26r2_openstack-operators(df2ff3b3-46c0-4a51-bac9-e19df21c24fa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.007141 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" podUID="df2ff3b3-46c0-4a51-bac9-e19df21c24fa" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.007220 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz"] Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.037367 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-qn926"] Nov 24 17:17:30 crc kubenswrapper[4760]: W1124 17:17:30.052087 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca3d8449_fd16_491b_bd2e_06dcd9103bdf.slice/crio-2e1362af77cc9b8e2be5bbddf14c5fa6398c46b889ff1179506e39b4b865aa55 WatchSource:0}: Error finding container 2e1362af77cc9b8e2be5bbddf14c5fa6398c46b889ff1179506e39b4b865aa55: Status 404 returned error can't find the container with id 2e1362af77cc9b8e2be5bbddf14c5fa6398c46b889ff1179506e39b4b865aa55 Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.051701 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp"] Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.053819 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-58kp5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6dd8864d7c-jxvzz_openstack-operators(11f165ab-07bd-46ce-ad35-5b349c9b16be): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:30 crc kubenswrapper[4760]: W1124 17:17:30.058667 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b5f6f3c_636d_4507_8c3d_51c1ac4693d6.slice/crio-333913b42e9b0ed6ff068e5abc2a6ee5ecb6f5daed7b7268ad112b2d1f1380cd WatchSource:0}: Error finding container 333913b42e9b0ed6ff068e5abc2a6ee5ecb6f5daed7b7268ad112b2d1f1380cd: Status 404 returned error can't find the container with id 333913b42e9b0ed6ff068e5abc2a6ee5ecb6f5daed7b7268ad112b2d1f1380cd Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.066922 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-76fc6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-b4c496f69-qn926_openstack-operators(5b5f6f3c-636d-4507-8c3d-51c1ac4693d6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.067936 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" podUID="f43007f0-7615-44a1-8594-dd0b0adbded6" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.087704 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" podUID="3ead61e1-d87a-44bb-8144-3198f06976c4" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.188688 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" podUID="6a3853ba-f14b-4d13-96c5-7b7a590086ca" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.278436 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" podUID="11f165ab-07bd-46ce-ad35-5b349c9b16be" Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.292983 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" podUID="5b5f6f3c-636d-4507-8c3d-51c1ac4693d6" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.312191 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.441340 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content\") pod \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.441420 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9d2j\" (UniqueName: \"kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j\") pod \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.441531 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities\") pod \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\" (UID: \"d7fb680d-a3bc-4cba-8231-d31eeef8e418\") " Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.442449 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities" (OuterVolumeSpecName: "utilities") pod "d7fb680d-a3bc-4cba-8231-d31eeef8e418" (UID: "d7fb680d-a3bc-4cba-8231-d31eeef8e418"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.460291 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j" (OuterVolumeSpecName: "kube-api-access-t9d2j") pod "d7fb680d-a3bc-4cba-8231-d31eeef8e418" (UID: "d7fb680d-a3bc-4cba-8231-d31eeef8e418"). InnerVolumeSpecName "kube-api-access-t9d2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.546824 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.546854 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9d2j\" (UniqueName: \"kubernetes.io/projected/d7fb680d-a3bc-4cba-8231-d31eeef8e418-kube-api-access-t9d2j\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.557897 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7fb680d-a3bc-4cba-8231-d31eeef8e418" (UID: "d7fb680d-a3bc-4cba-8231-d31eeef8e418"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.584327 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" event={"ID":"6a3853ba-f14b-4d13-96c5-7b7a590086ca","Type":"ContainerStarted","Data":"d8bb9ec64813ccc11a3e58af608bbc894d13e591b54137720b88a8c94bc38af5"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.584393 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" event={"ID":"6a3853ba-f14b-4d13-96c5-7b7a590086ca","Type":"ContainerStarted","Data":"714f04cd04a06153b0c292cf84228fbfd41f25fa658c4cd3d9f63f4cc714a581"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.592036 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" podUID="6a3853ba-f14b-4d13-96c5-7b7a590086ca" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.597869 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" event={"ID":"b075e65d-1bff-4853-9f78-339a20dde0d8","Type":"ContainerStarted","Data":"2c6d6c9325af13a38d306bde704f187c7a1a0cac662475bb8db83eac50ed8c6d"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.602887 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" event={"ID":"df2ff3b3-46c0-4a51-bac9-e19df21c24fa","Type":"ContainerStarted","Data":"a24a55eeda3587ba23d0ebd494ad8f4d16641b8f2716b5746e7442e49ac3ce73"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.604146 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" event={"ID":"12583812-acca-4939-9358-17b4bb668450","Type":"ContainerStarted","Data":"ad630955444f3ff4c260410c80a052b51b3ac66cb988f083949e6dbd94686210"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.605160 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" podUID="df2ff3b3-46c0-4a51-bac9-e19df21c24fa" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.606409 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" event={"ID":"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a","Type":"ContainerStarted","Data":"5584184a7b3f3aad879333ec842195da3a84c581bb98e18a522abe6d9d69a2e2"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.608170 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" event={"ID":"ca3d8449-fd16-491b-bd2e-06dcd9103bdf","Type":"ContainerStarted","Data":"aa73459bd31fd66149cdc4bdbb5a4ea8d0010bac6e3e1df624a8c9f59e968fc5"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.608189 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" event={"ID":"ca3d8449-fd16-491b-bd2e-06dcd9103bdf","Type":"ContainerStarted","Data":"2e1362af77cc9b8e2be5bbddf14c5fa6398c46b889ff1179506e39b4b865aa55"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.609124 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" event={"ID":"3ead61e1-d87a-44bb-8144-3198f06976c4","Type":"ContainerStarted","Data":"1db031c02a4cfcd8e974f9fb77006eb0d016ce12556bdcf308c454e9839c4172"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.609141 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" event={"ID":"3ead61e1-d87a-44bb-8144-3198f06976c4","Type":"ContainerStarted","Data":"c916bfd01357556ec7e22767a7074aae7233506caa8f7cc4835534930a7c2ce3"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.612694 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" podUID="3ead61e1-d87a-44bb-8144-3198f06976c4" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.614446 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" event={"ID":"43e5759b-21f0-45be-a96b-c0c86229273f","Type":"ContainerStarted","Data":"176d31227f2e507b3d217f9dc26fd3324946a88b19f9fb2a52e39a2252a9a8c4"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.620028 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" event={"ID":"11f165ab-07bd-46ce-ad35-5b349c9b16be","Type":"ContainerStarted","Data":"b93bbb718deeba91aecefc2489f6842435039e9cd93748db0bdef36b5f9f9b45"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.620060 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" event={"ID":"11f165ab-07bd-46ce-ad35-5b349c9b16be","Type":"ContainerStarted","Data":"4719e2873bd89ad6916b2ac58924f237a4c9c268dfd85fd031cb9d9048e0fa60"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.621051 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" podUID="11f165ab-07bd-46ce-ad35-5b349c9b16be" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.621128 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" event={"ID":"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba","Type":"ContainerStarted","Data":"2fde756d47c2ba712a3201a6369708487c169427f7f7f98706cebc729cf22528"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.622730 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" event={"ID":"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6","Type":"ContainerStarted","Data":"88a4f8c94c9e6e8c36ed505332d8432d59a3bc676fb6e8ebd7015126be816715"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.622753 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" event={"ID":"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6","Type":"ContainerStarted","Data":"333913b42e9b0ed6ff068e5abc2a6ee5ecb6f5daed7b7268ad112b2d1f1380cd"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.623826 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" podUID="5b5f6f3c-636d-4507-8c3d-51c1ac4693d6" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.625949 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-88vbt" event={"ID":"d7fb680d-a3bc-4cba-8231-d31eeef8e418","Type":"ContainerDied","Data":"b8dc44359a3e9e10f85b88accf0c2e6b1d094ff3d3fa1c02c705472db7072afe"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.625982 4760 scope.go:117] "RemoveContainer" containerID="75763cba45bf8a2028a8bb1a18bd04faa5b663fc64c104789d41ae8768f7f345" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.626086 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-88vbt" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.649091 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7fb680d-a3bc-4cba-8231-d31eeef8e418-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.680852 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" event={"ID":"f43007f0-7615-44a1-8594-dd0b0adbded6","Type":"ContainerStarted","Data":"1118c4e54b66af52e2e074858fe18d85d0f1b0d38c27ebb7bc78a246c4fc1772"} Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.680894 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" event={"ID":"f43007f0-7615-44a1-8594-dd0b0adbded6","Type":"ContainerStarted","Data":"f3083a0ce66dc99c26b8abf3da30668ba65beeb459357a8c411bdc09eed2217f"} Nov 24 17:17:30 crc kubenswrapper[4760]: E1124 17:17:30.683377 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" podUID="f43007f0-7615-44a1-8594-dd0b0adbded6" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.698240 4760 scope.go:117] "RemoveContainer" containerID="028d740eed8c82f9280251d3a0879a0e71e53ae0068cda3e5900ebc2fdf186f1" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.745853 4760 scope.go:117] "RemoveContainer" containerID="762b521261dc91617f4c13370a33345843eee40c5f215507572cb54447e87bed" Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.766105 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:30 crc kubenswrapper[4760]: I1124 17:17:30.787724 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-88vbt"] Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.479538 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" path="/var/lib/kubelet/pods/d7fb680d-a3bc-4cba-8231-d31eeef8e418/volumes" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.668777 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.669083 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="extract-utilities" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.669096 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="extract-utilities" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.669111 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="extract-content" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.669118 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="extract-content" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.669134 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="registry-server" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.669140 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="registry-server" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.669266 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7fb680d-a3bc-4cba-8231-d31eeef8e418" containerName="registry-server" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.670474 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.678021 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.709499 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" event={"ID":"ca3d8449-fd16-491b-bd2e-06dcd9103bdf","Type":"ContainerStarted","Data":"353563ac5ccbc4f5db4bd54f818d5f678ea2de4d766ce6b1958b2a9d695efb26"} Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.711744 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" podUID="3ead61e1-d87a-44bb-8144-3198f06976c4" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.712853 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" podUID="f43007f0-7615-44a1-8594-dd0b0adbded6" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.712909 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" podUID="5b5f6f3c-636d-4507-8c3d-51c1ac4693d6" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.712935 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" podUID="df2ff3b3-46c0-4a51-bac9-e19df21c24fa" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.713198 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" podUID="6a3853ba-f14b-4d13-96c5-7b7a590086ca" Nov 24 17:17:31 crc kubenswrapper[4760]: E1124 17:17:31.714661 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" podUID="11f165ab-07bd-46ce-ad35-5b349c9b16be" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.787586 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.787643 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55hwc\" (UniqueName: \"kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.788135 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.834751 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" podStartSLOduration=3.834734441 podStartE2EDuration="3.834734441s" podCreationTimestamp="2025-11-24 17:17:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:17:31.833368574 +0000 UTC m=+847.156250124" watchObservedRunningTime="2025-11-24 17:17:31.834734441 +0000 UTC m=+847.157615991" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.889676 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.889722 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.889750 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55hwc\" (UniqueName: \"kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.890478 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.890685 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.910470 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55hwc\" (UniqueName: \"kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc\") pod \"redhat-marketplace-jc8k8\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:31 crc kubenswrapper[4760]: I1124 17:17:31.995272 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:32 crc kubenswrapper[4760]: I1124 17:17:32.291563 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:32 crc kubenswrapper[4760]: I1124 17:17:32.720419 4760 generic.go:334] "Generic (PLEG): container finished" podID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerID="d313be90a319ba8e89e95f1004fa9948ac253c81d46b6744404b098379a5d3b5" exitCode=0 Nov 24 17:17:32 crc kubenswrapper[4760]: I1124 17:17:32.720571 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerDied","Data":"d313be90a319ba8e89e95f1004fa9948ac253c81d46b6744404b098379a5d3b5"} Nov 24 17:17:32 crc kubenswrapper[4760]: I1124 17:17:32.721258 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerStarted","Data":"5bd8125fb409a782a92da1db2f499afa661e1f893bde450264c1171dd5960789"} Nov 24 17:17:32 crc kubenswrapper[4760]: I1124 17:17:32.721274 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:39 crc kubenswrapper[4760]: I1124 17:17:39.447613 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-d5d9ddcff-zjhwp" Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.793729 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" event={"ID":"c0da29f6-094e-499d-90ea-93ddfe52e165","Type":"ContainerStarted","Data":"b26f1a3d262e9d0a750d28c4c72c6033380ae5e8e29c1edf57f0bbcdef242320"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.796989 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" event={"ID":"d7eea786-ecee-41f0-9a52-7ac9bef2f874","Type":"ContainerStarted","Data":"b273ed6bc4347a4cddd74fed70d36a42582d2939a823ebd528eca20e675fa093"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.798549 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" event={"ID":"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96","Type":"ContainerStarted","Data":"7b2b9d9e4c90fa11fa7048a99b162477c11e6a4fff0063acc6e541ba2e22dc08"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.799574 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" event={"ID":"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a","Type":"ContainerStarted","Data":"f50195d128282fa5bd6988ead6b2919e26765222fc5e052436cdef6e3cf37ba3"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.801166 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" event={"ID":"dc1dfda1-793b-4b06-a228-0e5472915f76","Type":"ContainerStarted","Data":"3f936134ecb87c4e85a6a3a858ed0b36485c129bc96c9ad971177faecdebed14"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.805402 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" event={"ID":"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995","Type":"ContainerStarted","Data":"2c3f2c46d70cc900407576fc1c968e7347f17c9777a8a6465f84e1359b10e948"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.806818 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" event={"ID":"981e3771-3dd1-4e3d-9601-7c16bbc22c8f","Type":"ContainerStarted","Data":"cafbeb064b27457f446369f9f3675c6ed2d1a15602412b9a27309f9efa36627c"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.808998 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" event={"ID":"43e5759b-21f0-45be-a96b-c0c86229273f","Type":"ContainerStarted","Data":"a42b38887d8b9b9187db226a7970ec9f4fc3fdc50b59d5229c4972f911b3aa77"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.811476 4760 generic.go:334] "Generic (PLEG): container finished" podID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerID="4130cd0202c075c4aa6a2140fff681c8f7870423d5003c977f723a4496a164f6" exitCode=0 Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.811550 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerDied","Data":"4130cd0202c075c4aa6a2140fff681c8f7870423d5003c977f723a4496a164f6"} Nov 24 17:17:41 crc kubenswrapper[4760]: I1124 17:17:41.815738 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" event={"ID":"8a48d8a2-3c00-4a6e-b88f-dab093355874","Type":"ContainerStarted","Data":"9344555a40cfaba270da4a73e821565715d372d5a99d8e96dd9fcb43de41ca4a"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.828233 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" event={"ID":"e3c878c9-0549-4e8b-bb1a-2754b8a8d402","Type":"ContainerStarted","Data":"a5e943a6e36ed8f8ca04f02908d54c654fdc6d03dffe1c574497bace251f0dbe"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.828701 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" event={"ID":"e3c878c9-0549-4e8b-bb1a-2754b8a8d402","Type":"ContainerStarted","Data":"7c68527bcb68081c7dcf8a053f3644ba76158102af88903d1cd6982a9a2d74f0"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.828727 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.830046 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" event={"ID":"b075e65d-1bff-4853-9f78-339a20dde0d8","Type":"ContainerStarted","Data":"4a54d2ecbe89b5db3e77ad2e854db06bb7c43f27cf5294171d34e31e54e88cc4"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.830089 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" event={"ID":"b075e65d-1bff-4853-9f78-339a20dde0d8","Type":"ContainerStarted","Data":"f799f18cea46f544e29129789cb0dfd6c3306089e04858cc17076a1e091ab8ed"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.830229 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.831650 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" event={"ID":"d7eea786-ecee-41f0-9a52-7ac9bef2f874","Type":"ContainerStarted","Data":"e973d454eab35b02e8352fd93067de82ba750de88373f18a1c5e68c3380184b2"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.831770 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.833315 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" event={"ID":"df86f3d1-75ea-4757-8115-1440d92160b6","Type":"ContainerStarted","Data":"7af97ffa95f46ee78f2adc62165446a1a279d13926a7cf5515c7333597f91933"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.833345 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" event={"ID":"df86f3d1-75ea-4757-8115-1440d92160b6","Type":"ContainerStarted","Data":"db5c339df6a8e09e5867eafc0069b68505aef51ace10a0323c029acd668d7615"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.833423 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.835254 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" event={"ID":"2ebc4c96-b0e9-4f9f-950b-5af42b867a8a","Type":"ContainerStarted","Data":"6a67efe1d7bd21dcbebd81b7bfe7038adca37610232bc8945faf27bb52d4a2c0"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.835703 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.837798 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" event={"ID":"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba","Type":"ContainerStarted","Data":"d96c6f0e59924ef96027416103334f5ca714b2534c55c083e03a36738c9f1139"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.837835 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" event={"ID":"51bd5ae4-002b-40c4-bd9e-b6d087bfdaba","Type":"ContainerStarted","Data":"74d3ed15eed98836720b048c460ae013a499504af41bd16c1b933104724f2685"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.837923 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.839365 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" event={"ID":"981e3771-3dd1-4e3d-9601-7c16bbc22c8f","Type":"ContainerStarted","Data":"643fdd0545245ca23528155929670e0ce6f0b450e0b7463c47a76dc2c4753c43"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.839446 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.841208 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" event={"ID":"c07ab946-dbd4-4fbf-b17c-7bfa133e1c96","Type":"ContainerStarted","Data":"da4faa8726eab773dce0e0282532566ff0cfe39c18dea924ca96387a135edaf0"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.841819 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.843764 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" event={"ID":"8a48d8a2-3c00-4a6e-b88f-dab093355874","Type":"ContainerStarted","Data":"7680189a1b5bd99b4ae36e47235d1038ce2525b41ade1ae52fa60cade30ffbe4"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.843919 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.845831 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" event={"ID":"abd30b3d-1e1d-4a1d-b4b6-aaf500949015","Type":"ContainerStarted","Data":"480c7df8c02766cb74b99187c5a020ea09182f4f2343e68f09bc568bfed513a3"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.845856 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" event={"ID":"abd30b3d-1e1d-4a1d-b4b6-aaf500949015","Type":"ContainerStarted","Data":"57efca17649a313afae94c9e49dc37ef8e0e9f7fed5cdff4c0089bd7a395494f"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.846471 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.849523 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" event={"ID":"12583812-acca-4939-9358-17b4bb668450","Type":"ContainerStarted","Data":"997425981eb27b868e76c20a01ec3e74c30ba5dc41cb65e5231d0a2b93742b50"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.849574 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" event={"ID":"12583812-acca-4939-9358-17b4bb668450","Type":"ContainerStarted","Data":"c815411929d42a03347af75ba26e1b557b1113c2dab9d5e7eaa46854f0dabb88"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.850148 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.853042 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" event={"ID":"93232e72-070f-4a46-89da-983cd8abe0b5","Type":"ContainerStarted","Data":"3ed16eb474548f792c7739b5c36142fce132efb09d2c70d392db6562289f0226"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.853069 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" event={"ID":"93232e72-070f-4a46-89da-983cd8abe0b5","Type":"ContainerStarted","Data":"bfa6147dac291643fc07ce2e6850f2280ae0fdc49869c2432cfce22389b00a57"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.853708 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.856208 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" event={"ID":"43e5759b-21f0-45be-a96b-c0c86229273f","Type":"ContainerStarted","Data":"e5febfcd83d06972cfba70ea699c259c458af3c5421cc9851eded029a5d81d27"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.856898 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.858650 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" event={"ID":"dc1dfda1-793b-4b06-a228-0e5472915f76","Type":"ContainerStarted","Data":"3fc6d65761a9ff260d8e8564f70f8b24a863e3a1c4bef27874ba5996cd7de152"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.859114 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.862370 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerStarted","Data":"9300634ecb940cdfc98ffd124c0ce388463deb5d548ca5762fb05e6e4851706c"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.865390 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" event={"ID":"a29a1da0-a007-4d2d-8ca2-0a3f78e4d995","Type":"ContainerStarted","Data":"e3588df5ed325539e6f0cd1a2f54b2a1e99cdedd231997d54738da7ecf551644"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.865851 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.873561 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" event={"ID":"c0da29f6-094e-499d-90ea-93ddfe52e165","Type":"ContainerStarted","Data":"8efefb07c2d52fd90eaed632e66a52b2baccd780397f6edb86a166cbe8a567b3"} Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.874227 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.882812 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" podStartSLOduration=3.8868993659999997 podStartE2EDuration="15.882793251s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.0285822 +0000 UTC m=+844.351463750" lastFinishedPulling="2025-11-24 17:17:41.024476085 +0000 UTC m=+856.347357635" observedRunningTime="2025-11-24 17:17:42.860074956 +0000 UTC m=+858.182956506" watchObservedRunningTime="2025-11-24 17:17:42.882793251 +0000 UTC m=+858.205674801" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.909993 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" podStartSLOduration=4.031250386 podStartE2EDuration="15.909976898s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.181682991 +0000 UTC m=+844.504564541" lastFinishedPulling="2025-11-24 17:17:41.060409493 +0000 UTC m=+856.383291053" observedRunningTime="2025-11-24 17:17:42.904695603 +0000 UTC m=+858.227577153" watchObservedRunningTime="2025-11-24 17:17:42.909976898 +0000 UTC m=+858.232858448" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.910572 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" podStartSLOduration=4.7914476409999995 podStartE2EDuration="15.910567814s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.94126698 +0000 UTC m=+845.264148550" lastFinishedPulling="2025-11-24 17:17:41.060387173 +0000 UTC m=+856.383268723" observedRunningTime="2025-11-24 17:17:42.890948165 +0000 UTC m=+858.213829715" watchObservedRunningTime="2025-11-24 17:17:42.910567814 +0000 UTC m=+858.233449364" Nov 24 17:17:42 crc kubenswrapper[4760]: I1124 17:17:42.939944 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" podStartSLOduration=4.944984424 podStartE2EDuration="15.939929472s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:30.031037729 +0000 UTC m=+845.353919279" lastFinishedPulling="2025-11-24 17:17:41.025982777 +0000 UTC m=+856.348864327" observedRunningTime="2025-11-24 17:17:42.936479767 +0000 UTC m=+858.259361317" watchObservedRunningTime="2025-11-24 17:17:42.939929472 +0000 UTC m=+858.262811022" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.004036 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" podStartSLOduration=4.479753961 podStartE2EDuration="16.004020885s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.587199763 +0000 UTC m=+844.910081303" lastFinishedPulling="2025-11-24 17:17:41.111466657 +0000 UTC m=+856.434348227" observedRunningTime="2025-11-24 17:17:42.976485117 +0000 UTC m=+858.299366667" watchObservedRunningTime="2025-11-24 17:17:43.004020885 +0000 UTC m=+858.326902425" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.022272 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" podStartSLOduration=4.437376755 podStartE2EDuration="16.022253276s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.421883717 +0000 UTC m=+844.744765257" lastFinishedPulling="2025-11-24 17:17:41.006760218 +0000 UTC m=+856.329641778" observedRunningTime="2025-11-24 17:17:43.002233805 +0000 UTC m=+858.325115355" watchObservedRunningTime="2025-11-24 17:17:43.022253276 +0000 UTC m=+858.345134826" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.022656 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" podStartSLOduration=4.556125761 podStartE2EDuration="16.022650827s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.626466993 +0000 UTC m=+844.949348543" lastFinishedPulling="2025-11-24 17:17:41.092992049 +0000 UTC m=+856.415873609" observedRunningTime="2025-11-24 17:17:43.017635169 +0000 UTC m=+858.340516729" watchObservedRunningTime="2025-11-24 17:17:43.022650827 +0000 UTC m=+858.345532377" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.057652 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" podStartSLOduration=3.992182921 podStartE2EDuration="16.057635319s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:28.939952843 +0000 UTC m=+844.262834393" lastFinishedPulling="2025-11-24 17:17:41.005405241 +0000 UTC m=+856.328286791" observedRunningTime="2025-11-24 17:17:43.054450141 +0000 UTC m=+858.377331691" watchObservedRunningTime="2025-11-24 17:17:43.057635319 +0000 UTC m=+858.380516869" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.102676 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" podStartSLOduration=4.655330848 podStartE2EDuration="16.102657447s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.619034948 +0000 UTC m=+844.941916498" lastFinishedPulling="2025-11-24 17:17:41.066361537 +0000 UTC m=+856.389243097" observedRunningTime="2025-11-24 17:17:43.101193537 +0000 UTC m=+858.424075087" watchObservedRunningTime="2025-11-24 17:17:43.102657447 +0000 UTC m=+858.425538997" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.103868 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" podStartSLOduration=4.425812247 podStartE2EDuration="16.10386453s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.416651243 +0000 UTC m=+844.739532793" lastFinishedPulling="2025-11-24 17:17:41.094703526 +0000 UTC m=+856.417585076" observedRunningTime="2025-11-24 17:17:43.082090391 +0000 UTC m=+858.404971941" watchObservedRunningTime="2025-11-24 17:17:43.10386453 +0000 UTC m=+858.426746080" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.124629 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" podStartSLOduration=4.5046521550000005 podStartE2EDuration="16.124611691s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.409802074 +0000 UTC m=+844.732683624" lastFinishedPulling="2025-11-24 17:17:41.0297616 +0000 UTC m=+856.352643160" observedRunningTime="2025-11-24 17:17:43.122330318 +0000 UTC m=+858.445211868" watchObservedRunningTime="2025-11-24 17:17:43.124611691 +0000 UTC m=+858.447493241" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.145726 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" podStartSLOduration=4.006360582 podStartE2EDuration="16.145707951s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:28.92565227 +0000 UTC m=+844.248533820" lastFinishedPulling="2025-11-24 17:17:41.064999639 +0000 UTC m=+856.387881189" observedRunningTime="2025-11-24 17:17:43.141972318 +0000 UTC m=+858.464853858" watchObservedRunningTime="2025-11-24 17:17:43.145707951 +0000 UTC m=+858.468589501" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.165773 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" podStartSLOduration=3.953343893 podStartE2EDuration="16.165757012s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:28.82420113 +0000 UTC m=+844.147082690" lastFinishedPulling="2025-11-24 17:17:41.036614259 +0000 UTC m=+856.359495809" observedRunningTime="2025-11-24 17:17:43.165615038 +0000 UTC m=+858.488496588" watchObservedRunningTime="2025-11-24 17:17:43.165757012 +0000 UTC m=+858.488638562" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.191523 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" podStartSLOduration=4.579601656 podStartE2EDuration="16.191507031s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.417981729 +0000 UTC m=+844.740863279" lastFinishedPulling="2025-11-24 17:17:41.029887094 +0000 UTC m=+856.352768654" observedRunningTime="2025-11-24 17:17:43.190208085 +0000 UTC m=+858.513089645" watchObservedRunningTime="2025-11-24 17:17:43.191507031 +0000 UTC m=+858.514388581" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.214026 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" podStartSLOduration=4.610910707 podStartE2EDuration="16.213988639s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.427684846 +0000 UTC m=+844.750566386" lastFinishedPulling="2025-11-24 17:17:41.030762768 +0000 UTC m=+856.353644318" observedRunningTime="2025-11-24 17:17:43.207774378 +0000 UTC m=+858.530655928" watchObservedRunningTime="2025-11-24 17:17:43.213988639 +0000 UTC m=+858.536870199" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.230343 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" podStartSLOduration=4.591542904 podStartE2EDuration="16.230299407s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.391044388 +0000 UTC m=+844.713925938" lastFinishedPulling="2025-11-24 17:17:41.029800851 +0000 UTC m=+856.352682441" observedRunningTime="2025-11-24 17:17:43.225593228 +0000 UTC m=+858.548474778" watchObservedRunningTime="2025-11-24 17:17:43.230299407 +0000 UTC m=+858.553180957" Nov 24 17:17:43 crc kubenswrapper[4760]: I1124 17:17:43.255018 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jc8k8" podStartSLOduration=3.044290596 podStartE2EDuration="12.254983466s" podCreationTimestamp="2025-11-24 17:17:31 +0000 UTC" firstStartedPulling="2025-11-24 17:17:33.20773813 +0000 UTC m=+848.530619680" lastFinishedPulling="2025-11-24 17:17:42.41843101 +0000 UTC m=+857.741312550" observedRunningTime="2025-11-24 17:17:43.249378932 +0000 UTC m=+858.572260482" watchObservedRunningTime="2025-11-24 17:17:43.254983466 +0000 UTC m=+858.577865016" Nov 24 17:17:44 crc kubenswrapper[4760]: I1124 17:17:44.894380 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" event={"ID":"5b5f6f3c-636d-4507-8c3d-51c1ac4693d6","Type":"ContainerStarted","Data":"26bdc41801be790df421d1c134969f8f23588f4f32aa69e3fa1d74501e2657c9"} Nov 24 17:17:44 crc kubenswrapper[4760]: I1124 17:17:44.920564 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" podStartSLOduration=2.500496519 podStartE2EDuration="16.92054404s" podCreationTimestamp="2025-11-24 17:17:28 +0000 UTC" firstStartedPulling="2025-11-24 17:17:30.066772072 +0000 UTC m=+845.389653622" lastFinishedPulling="2025-11-24 17:17:44.486819593 +0000 UTC m=+859.809701143" observedRunningTime="2025-11-24 17:17:44.913830375 +0000 UTC m=+860.236711925" watchObservedRunningTime="2025-11-24 17:17:44.92054404 +0000 UTC m=+860.243425600" Nov 24 17:17:47 crc kubenswrapper[4760]: I1124 17:17:47.892171 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-f97qw" Nov 24 17:17:47 crc kubenswrapper[4760]: I1124 17:17:47.973697 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-9fscr" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.036990 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-6tfrh" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.110023 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-znhd6" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.111243 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-dm4k7" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.140319 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-95w5b" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.162279 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-zd54m" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.313502 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-wgd79" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.358950 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-87pfs" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.402821 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-j6f4z" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.435595 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-fl68l" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.505527 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-r7dzv" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.699100 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-9cxcs" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.769237 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-54d7678447-gcrcj" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.779715 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-mnszq" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.791308 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:48 crc kubenswrapper[4760]: I1124 17:17:48.868857 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm" Nov 24 17:17:51 crc kubenswrapper[4760]: I1124 17:17:51.995393 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:51 crc kubenswrapper[4760]: I1124 17:17:51.995784 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:52 crc kubenswrapper[4760]: I1124 17:17:52.042555 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:52 crc kubenswrapper[4760]: I1124 17:17:52.998963 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:55 crc kubenswrapper[4760]: I1124 17:17:55.790442 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:55 crc kubenswrapper[4760]: I1124 17:17:55.790914 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jc8k8" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="registry-server" containerID="cri-o://9300634ecb940cdfc98ffd124c0ce388463deb5d548ca5762fb05e6e4851706c" gracePeriod=2 Nov 24 17:17:56 crc kubenswrapper[4760]: I1124 17:17:56.988034 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" event={"ID":"6a3853ba-f14b-4d13-96c5-7b7a590086ca","Type":"ContainerStarted","Data":"9b6dac0c29f99cc5be538ff183a8f3fdfbc3aca823f70eefbc8cfa3e242ea6b5"} Nov 24 17:17:56 crc kubenswrapper[4760]: I1124 17:17:56.988611 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:17:56 crc kubenswrapper[4760]: I1124 17:17:56.990967 4760 generic.go:334] "Generic (PLEG): container finished" podID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerID="9300634ecb940cdfc98ffd124c0ce388463deb5d548ca5762fb05e6e4851706c" exitCode=0 Nov 24 17:17:56 crc kubenswrapper[4760]: I1124 17:17:56.991045 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerDied","Data":"9300634ecb940cdfc98ffd124c0ce388463deb5d548ca5762fb05e6e4851706c"} Nov 24 17:17:56 crc kubenswrapper[4760]: I1124 17:17:56.992919 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" event={"ID":"df2ff3b3-46c0-4a51-bac9-e19df21c24fa","Type":"ContainerStarted","Data":"1fa128155bb3693b336f56ada7e81f2e1881166ebdb6225e105e5daf332eb93f"} Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.011906 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" podStartSLOduration=3.6521287300000003 podStartE2EDuration="29.01188609s" podCreationTimestamp="2025-11-24 17:17:28 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.986329649 +0000 UTC m=+845.309211199" lastFinishedPulling="2025-11-24 17:17:55.346086969 +0000 UTC m=+870.668968559" observedRunningTime="2025-11-24 17:17:57.006244964 +0000 UTC m=+872.329126534" watchObservedRunningTime="2025-11-24 17:17:57.01188609 +0000 UTC m=+872.334767650" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.024270 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-b26r2" podStartSLOduration=2.790248377 podStartE2EDuration="29.024208878s" podCreationTimestamp="2025-11-24 17:17:28 +0000 UTC" firstStartedPulling="2025-11-24 17:17:30.005927358 +0000 UTC m=+845.328808908" lastFinishedPulling="2025-11-24 17:17:56.239887849 +0000 UTC m=+871.562769409" observedRunningTime="2025-11-24 17:17:57.020875877 +0000 UTC m=+872.343757437" watchObservedRunningTime="2025-11-24 17:17:57.024208878 +0000 UTC m=+872.347090438" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.196460 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.306287 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content\") pod \"28576a4e-8691-44e1-b1da-891e95a3fa9e\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.306388 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55hwc\" (UniqueName: \"kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc\") pod \"28576a4e-8691-44e1-b1da-891e95a3fa9e\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.306424 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities\") pod \"28576a4e-8691-44e1-b1da-891e95a3fa9e\" (UID: \"28576a4e-8691-44e1-b1da-891e95a3fa9e\") " Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.307671 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities" (OuterVolumeSpecName: "utilities") pod "28576a4e-8691-44e1-b1da-891e95a3fa9e" (UID: "28576a4e-8691-44e1-b1da-891e95a3fa9e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.315283 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc" (OuterVolumeSpecName: "kube-api-access-55hwc") pod "28576a4e-8691-44e1-b1da-891e95a3fa9e" (UID: "28576a4e-8691-44e1-b1da-891e95a3fa9e"). InnerVolumeSpecName "kube-api-access-55hwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.327486 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28576a4e-8691-44e1-b1da-891e95a3fa9e" (UID: "28576a4e-8691-44e1-b1da-891e95a3fa9e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.408217 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55hwc\" (UniqueName: \"kubernetes.io/projected/28576a4e-8691-44e1-b1da-891e95a3fa9e-kube-api-access-55hwc\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.408244 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:57 crc kubenswrapper[4760]: I1124 17:17:57.408254 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28576a4e-8691-44e1-b1da-891e95a3fa9e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.005301 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jc8k8" event={"ID":"28576a4e-8691-44e1-b1da-891e95a3fa9e","Type":"ContainerDied","Data":"5bd8125fb409a782a92da1db2f499afa661e1f893bde450264c1171dd5960789"} Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.005362 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jc8k8" Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.005376 4760 scope.go:117] "RemoveContainer" containerID="9300634ecb940cdfc98ffd124c0ce388463deb5d548ca5762fb05e6e4851706c" Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.043946 4760 scope.go:117] "RemoveContainer" containerID="4130cd0202c075c4aa6a2140fff681c8f7870423d5003c977f723a4496a164f6" Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.053641 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.071189 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jc8k8"] Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.096174 4760 scope.go:117] "RemoveContainer" containerID="d313be90a319ba8e89e95f1004fa9948ac253c81d46b6744404b098379a5d3b5" Nov 24 17:17:58 crc kubenswrapper[4760]: I1124 17:17:58.791528 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-b4c496f69-qn926" Nov 24 17:17:59 crc kubenswrapper[4760]: I1124 17:17:59.491119 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" path="/var/lib/kubelet/pods/28576a4e-8691-44e1-b1da-891e95a3fa9e/volumes" Nov 24 17:18:00 crc kubenswrapper[4760]: I1124 17:18:00.019776 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" event={"ID":"f43007f0-7615-44a1-8594-dd0b0adbded6","Type":"ContainerStarted","Data":"6693d39e535d5b11538c74ebc8cbd087064689a359e422c03a3fb62dcd2d80f8"} Nov 24 17:18:00 crc kubenswrapper[4760]: I1124 17:18:00.020898 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:18:00 crc kubenswrapper[4760]: I1124 17:18:00.024376 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" event={"ID":"11f165ab-07bd-46ce-ad35-5b349c9b16be","Type":"ContainerStarted","Data":"6f01bd6e78fe172864f1b49410c578cb05bf3075ea19dc181f233b15946b797d"} Nov 24 17:18:00 crc kubenswrapper[4760]: I1124 17:18:00.042825 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" podStartSLOduration=5.970774324 podStartE2EDuration="33.042802232s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.627061949 +0000 UTC m=+844.949943499" lastFinishedPulling="2025-11-24 17:17:56.699089857 +0000 UTC m=+872.021971407" observedRunningTime="2025-11-24 17:18:00.039196013 +0000 UTC m=+875.362077563" watchObservedRunningTime="2025-11-24 17:18:00.042802232 +0000 UTC m=+875.365683782" Nov 24 17:18:01 crc kubenswrapper[4760]: I1124 17:18:01.033578 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" event={"ID":"3ead61e1-d87a-44bb-8144-3198f06976c4","Type":"ContainerStarted","Data":"ff4e2bf9d8e937e6d5d80655cd6a86e5d443cb958a6b9c3be03f0a96b929521d"} Nov 24 17:18:01 crc kubenswrapper[4760]: I1124 17:18:01.034108 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:18:01 crc kubenswrapper[4760]: I1124 17:18:01.078062 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" podStartSLOduration=7.463337711 podStartE2EDuration="34.078035152s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:30.053695942 +0000 UTC m=+845.376577492" lastFinishedPulling="2025-11-24 17:17:56.668393353 +0000 UTC m=+871.991274933" observedRunningTime="2025-11-24 17:18:01.06559218 +0000 UTC m=+876.388473780" watchObservedRunningTime="2025-11-24 17:18:01.078035152 +0000 UTC m=+876.400916742" Nov 24 17:18:01 crc kubenswrapper[4760]: I1124 17:18:01.094270 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" podStartSLOduration=3.058754122 podStartE2EDuration="34.094244398s" podCreationTimestamp="2025-11-24 17:17:27 +0000 UTC" firstStartedPulling="2025-11-24 17:17:29.804379086 +0000 UTC m=+845.127260646" lastFinishedPulling="2025-11-24 17:18:00.839869372 +0000 UTC m=+876.162750922" observedRunningTime="2025-11-24 17:18:01.084390117 +0000 UTC m=+876.407271707" watchObservedRunningTime="2025-11-24 17:18:01.094244398 +0000 UTC m=+876.417125988" Nov 24 17:18:08 crc kubenswrapper[4760]: I1124 17:18:08.541569 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-hn7wz" Nov 24 17:18:08 crc kubenswrapper[4760]: I1124 17:18:08.675184 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:18:08 crc kubenswrapper[4760]: I1124 17:18:08.676597 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-tjtzh" Nov 24 17:18:08 crc kubenswrapper[4760]: I1124 17:18:08.758974 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-jxvzz" Nov 24 17:18:08 crc kubenswrapper[4760]: I1124 17:18:08.808331 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-8jmvq" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.568635 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:23 crc kubenswrapper[4760]: E1124 17:18:23.569221 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="registry-server" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.569233 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="registry-server" Nov 24 17:18:23 crc kubenswrapper[4760]: E1124 17:18:23.569277 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="extract-utilities" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.569283 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="extract-utilities" Nov 24 17:18:23 crc kubenswrapper[4760]: E1124 17:18:23.569307 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="extract-content" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.569313 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="extract-content" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.569437 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="28576a4e-8691-44e1-b1da-891e95a3fa9e" containerName="registry-server" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.570157 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.574598 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-v6h5t" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.579506 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.579716 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.579945 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.583120 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.626248 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.628219 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.633413 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.641222 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.684704 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.685034 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrjhp\" (UniqueName: \"kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.786697 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.786937 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.787089 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbpx5\" (UniqueName: \"kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.787193 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrjhp\" (UniqueName: \"kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.787297 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.788343 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.805660 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrjhp\" (UniqueName: \"kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp\") pod \"dnsmasq-dns-675f4bcbfc-5wrpr\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.888871 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbpx5\" (UniqueName: \"kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.888929 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.889014 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.889666 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.889669 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.891524 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.907570 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbpx5\" (UniqueName: \"kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5\") pod \"dnsmasq-dns-78dd6ddcc-mcw7g\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:23 crc kubenswrapper[4760]: I1124 17:18:23.946192 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:24 crc kubenswrapper[4760]: I1124 17:18:24.334530 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:24 crc kubenswrapper[4760]: I1124 17:18:24.387374 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:25 crc kubenswrapper[4760]: I1124 17:18:25.232823 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" event={"ID":"28ae214e-91a1-4366-bad6-2afdfd94e760","Type":"ContainerStarted","Data":"b703e66beedf40363ad3184012a6927c69075261439ed4680f18fe6c6c4f8c2d"} Nov 24 17:18:25 crc kubenswrapper[4760]: I1124 17:18:25.234667 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" event={"ID":"0181220f-f87c-44b7-b544-a38e518fdaf6","Type":"ContainerStarted","Data":"8dcadadba5cf3138ea01e314c762eeaf12eb71631be4e3f3c7e392f4e594bfd0"} Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.544143 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.568987 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.578381 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.578477 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.727813 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl24z\" (UniqueName: \"kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.727856 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.727873 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.829086 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl24z\" (UniqueName: \"kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.829139 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.829159 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.830189 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.830387 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.848882 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl24z\" (UniqueName: \"kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z\") pod \"dnsmasq-dns-666b6646f7-fzbmc\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.923047 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:26 crc kubenswrapper[4760]: I1124 17:18:26.954082 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.023963 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.025234 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.042713 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.133888 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.133930 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhcc4\" (UniqueName: \"kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.133961 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.235841 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.235981 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhcc4\" (UniqueName: \"kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.236181 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.236973 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.237815 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.261624 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhcc4\" (UniqueName: \"kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4\") pod \"dnsmasq-dns-57d769cc4f-x75ct\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.363927 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.545389 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.777534 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.786585 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790244 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790253 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790305 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790331 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790415 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790439 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.790660 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-jszzd" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.793161 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948513 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948552 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948615 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948665 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55qbx\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948733 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948775 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948817 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948843 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948910 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:27 crc kubenswrapper[4760]: I1124 17:18:27.948974 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050162 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050435 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050467 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050492 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55qbx\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050526 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050553 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050587 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050607 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050630 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050670 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.050706 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.051159 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.051293 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.051967 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.052241 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.056285 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.056458 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.056471 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.057286 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.064722 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.065160 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.069632 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55qbx\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.077247 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.108686 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.174937 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.179251 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.181448 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.181589 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.181708 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.181989 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5x4sv" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.182655 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.184719 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.184891 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.185040 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259780 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259833 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259865 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259887 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259925 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.259946 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.260042 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.260080 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.260104 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.260123 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4sb8\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.260158 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.361899 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.361957 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.361983 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4sb8\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362039 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362066 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362102 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362129 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362194 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362219 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362283 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362543 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.362828 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.363252 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.363415 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.363505 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.364426 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.368110 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.368452 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.368461 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.370017 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.382372 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4sb8\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.385499 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:28 crc kubenswrapper[4760]: I1124 17:18:28.508579 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.549761 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.555451 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.560016 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.561292 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.561450 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-h5wf8" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.561712 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.562545 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.566457 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683295 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683353 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683394 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683426 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjzmc\" (UniqueName: \"kubernetes.io/projected/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kube-api-access-bjzmc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683450 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683483 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kolla-config\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683513 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-default\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.683536 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.784862 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.784960 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjzmc\" (UniqueName: \"kubernetes.io/projected/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kube-api-access-bjzmc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.784985 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.785050 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kolla-config\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.785079 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-default\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.785101 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.785404 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.785954 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kolla-config\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.786046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.786131 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.786250 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-default\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.786354 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.787393 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.789024 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.789714 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.807209 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.811582 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjzmc\" (UniqueName: \"kubernetes.io/projected/ad1c45c2-91fc-4d03-9778-1f8ac8b891e5-kube-api-access-bjzmc\") pod \"openstack-galera-0\" (UID: \"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5\") " pod="openstack/openstack-galera-0" Nov 24 17:18:29 crc kubenswrapper[4760]: I1124 17:18:29.878541 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 17:18:30 crc kubenswrapper[4760]: W1124 17:18:30.400274 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82cf1c78_b285_4d26_9d47_693dbef6b473.slice/crio-4793525f36b572a99a87c975fcdccc6224a1f5f49024be9a6fe619b8fc5cbac7 WatchSource:0}: Error finding container 4793525f36b572a99a87c975fcdccc6224a1f5f49024be9a6fe619b8fc5cbac7: Status 404 returned error can't find the container with id 4793525f36b572a99a87c975fcdccc6224a1f5f49024be9a6fe619b8fc5cbac7 Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.404809 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.933426 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.936305 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.942342 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-48qht" Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.947287 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.947593 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.949144 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 24 17:18:30 crc kubenswrapper[4760]: I1124 17:18:30.956330 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039450 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039495 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dj2c\" (UniqueName: \"kubernetes.io/projected/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kube-api-access-6dj2c\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039524 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039593 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039681 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039701 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.039726 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141275 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141327 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141386 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141412 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141440 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141556 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dj2c\" (UniqueName: \"kubernetes.io/projected/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kube-api-access-6dj2c\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141587 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141702 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.141891 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.142214 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.143171 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.143832 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e072af6f-796e-4c4c-b7fa-a36ad7b972be-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.148952 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.152307 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e072af6f-796e-4c4c-b7fa-a36ad7b972be-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.158287 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dj2c\" (UniqueName: \"kubernetes.io/projected/e072af6f-796e-4c4c-b7fa-a36ad7b972be-kube-api-access-6dj2c\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.182235 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e072af6f-796e-4c4c-b7fa-a36ad7b972be\") " pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.255433 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.309592 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.310653 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.314160 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-x48hb" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.314239 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.314168 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.321636 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.358660 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerStarted","Data":"4793525f36b572a99a87c975fcdccc6224a1f5f49024be9a6fe619b8fc5cbac7"} Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.449355 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-kolla-config\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.449404 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.449496 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.449546 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mst4x\" (UniqueName: \"kubernetes.io/projected/5645a4cb-e092-4b3f-a704-c3497f304e80-kube-api-access-mst4x\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.449594 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-config-data\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.551384 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mst4x\" (UniqueName: \"kubernetes.io/projected/5645a4cb-e092-4b3f-a704-c3497f304e80-kube-api-access-mst4x\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.551422 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-config-data\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.551462 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-kolla-config\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.551478 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.551554 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.552505 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-kolla-config\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.552574 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5645a4cb-e092-4b3f-a704-c3497f304e80-config-data\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.554767 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-memcached-tls-certs\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.555671 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5645a4cb-e092-4b3f-a704-c3497f304e80-combined-ca-bundle\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.582386 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mst4x\" (UniqueName: \"kubernetes.io/projected/5645a4cb-e092-4b3f-a704-c3497f304e80-kube-api-access-mst4x\") pod \"memcached-0\" (UID: \"5645a4cb-e092-4b3f-a704-c3497f304e80\") " pod="openstack/memcached-0" Nov 24 17:18:31 crc kubenswrapper[4760]: I1124 17:18:31.643780 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.214347 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.215807 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.219963 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-27xzg" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.227563 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.385061 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm5lg\" (UniqueName: \"kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg\") pod \"kube-state-metrics-0\" (UID: \"54671374-7f83-4a6d-98a2-c5371e84a5f7\") " pod="openstack/kube-state-metrics-0" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.486614 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm5lg\" (UniqueName: \"kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg\") pod \"kube-state-metrics-0\" (UID: \"54671374-7f83-4a6d-98a2-c5371e84a5f7\") " pod="openstack/kube-state-metrics-0" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.516489 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm5lg\" (UniqueName: \"kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg\") pod \"kube-state-metrics-0\" (UID: \"54671374-7f83-4a6d-98a2-c5371e84a5f7\") " pod="openstack/kube-state-metrics-0" Nov 24 17:18:33 crc kubenswrapper[4760]: I1124 17:18:33.538519 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.606818 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dl9cm"] Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.611765 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.614400 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-5dl4b" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.614440 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.614687 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.630242 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm"] Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649491 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-ovn-controller-tls-certs\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649550 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649575 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39e10c47-4e85-46de-a754-3ee0245718d7-scripts\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649634 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649669 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-log-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649820 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67qf8\" (UniqueName: \"kubernetes.io/projected/39e10c47-4e85-46de-a754-3ee0245718d7-kube-api-access-67qf8\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.649858 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-combined-ca-bundle\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.667943 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-bnfkl"] Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.669955 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.678319 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-bnfkl"] Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751167 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-log\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751522 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67qf8\" (UniqueName: \"kubernetes.io/projected/39e10c47-4e85-46de-a754-3ee0245718d7-kube-api-access-67qf8\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751542 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-combined-ca-bundle\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751561 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-run\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751615 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9bf53ae-4ba1-4619-b603-550b974e1970-scripts\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751633 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmmdl\" (UniqueName: \"kubernetes.io/projected/f9bf53ae-4ba1-4619-b603-550b974e1970-kube-api-access-zmmdl\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751657 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-ovn-controller-tls-certs\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751705 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751727 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39e10c47-4e85-46de-a754-3ee0245718d7-scripts\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751770 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751794 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-etc-ovs\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751821 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-log-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.751843 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-lib\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.752659 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.752735 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-run-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.752754 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/39e10c47-4e85-46de-a754-3ee0245718d7-var-log-ovn\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.754324 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39e10c47-4e85-46de-a754-3ee0245718d7-scripts\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.761786 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-combined-ca-bundle\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.762944 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e10c47-4e85-46de-a754-3ee0245718d7-ovn-controller-tls-certs\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.769383 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67qf8\" (UniqueName: \"kubernetes.io/projected/39e10c47-4e85-46de-a754-3ee0245718d7-kube-api-access-67qf8\") pod \"ovn-controller-dl9cm\" (UID: \"39e10c47-4e85-46de-a754-3ee0245718d7\") " pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853264 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-etc-ovs\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853317 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-lib\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853338 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-log\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853362 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-run\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853402 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9bf53ae-4ba1-4619-b603-550b974e1970-scripts\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853421 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmmdl\" (UniqueName: \"kubernetes.io/projected/f9bf53ae-4ba1-4619-b603-550b974e1970-kube-api-access-zmmdl\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853878 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-log\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853878 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-etc-ovs\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.853957 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-run\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.854127 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f9bf53ae-4ba1-4619-b603-550b974e1970-var-lib\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.856695 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f9bf53ae-4ba1-4619-b603-550b974e1970-scripts\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.868544 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmmdl\" (UniqueName: \"kubernetes.io/projected/f9bf53ae-4ba1-4619-b603-550b974e1970-kube-api-access-zmmdl\") pod \"ovn-controller-ovs-bnfkl\" (UID: \"f9bf53ae-4ba1-4619-b603-550b974e1970\") " pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:36 crc kubenswrapper[4760]: I1124 17:18:36.938831 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:37 crc kubenswrapper[4760]: I1124 17:18:37.017702 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.012825 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.014536 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hbpx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-mcw7g_openstack(0181220f-f87c-44b7-b544-a38e518fdaf6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.016272 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" podUID="0181220f-f87c-44b7-b544-a38e518fdaf6" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.114670 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.115055 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lrjhp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-5wrpr_openstack(28ae214e-91a1-4366-bad6-2afdfd94e760): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:18:40 crc kubenswrapper[4760]: E1124 17:18:40.116403 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" podUID="28ae214e-91a1-4366-bad6-2afdfd94e760" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.378479 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.381741 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.389437 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.389590 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.389665 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tg45m" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.389600 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.389844 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.392104 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.457647 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.457687 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.458944 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerStarted","Data":"870672f03bf229476a9befb06991122787d83cbd6011067fb754cc5c2b4e9cdf"} Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510723 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510763 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510781 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510812 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510835 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k84j\" (UniqueName: \"kubernetes.io/projected/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-kube-api-access-4k84j\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510897 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510921 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.510948 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613089 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k84j\" (UniqueName: \"kubernetes.io/projected/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-kube-api-access-4k84j\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613625 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613667 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613702 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613773 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613815 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613833 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.613920 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.614762 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-config\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.626689 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.628895 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.629899 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.644112 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.648902 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.661106 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.666872 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.672858 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hjb2x" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.673103 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.673815 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.673962 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.694968 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.696929 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.698990 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k84j\" (UniqueName: \"kubernetes.io/projected/20f2f51e-4902-44f9-97d6-1ebf12c22ad6-kube-api-access-4k84j\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.722869 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.727158 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20f2f51e-4902-44f9-97d6-1ebf12c22ad6\") " pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.734113 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.738065 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.739464 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.761309 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: W1124 17:18:40.767864 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a459f6d_ed01_4235_9062_4deb6ac9ccec.slice/crio-0cf61669b8e9c7b907848d7b48ca1faeb074a03ce6f11b16c73bddb014cbfeb0 WatchSource:0}: Error finding container 0cf61669b8e9c7b907848d7b48ca1faeb074a03ce6f11b16c73bddb014cbfeb0: Status 404 returned error can't find the container with id 0cf61669b8e9c7b907848d7b48ca1faeb074a03ce6f11b16c73bddb014cbfeb0 Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.816191 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgtl8\" (UniqueName: \"kubernetes.io/projected/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-kube-api-access-mgtl8\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.816336 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.816419 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.816501 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.817156 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.817267 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.817406 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.817463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-config\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.915983 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918601 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-config\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918651 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgtl8\" (UniqueName: \"kubernetes.io/projected/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-kube-api-access-mgtl8\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918693 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918718 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918739 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918763 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918797 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.918837 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.919929 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.920353 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.921577 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.922789 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-config\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.926430 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.931431 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.931491 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.942218 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm"] Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.947088 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgtl8\" (UniqueName: \"kubernetes.io/projected/b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b-kube-api-access-mgtl8\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: W1124 17:18:40.948744 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39e10c47_4e85_46de_a754_3ee0245718d7.slice/crio-db28310070d1e755dc1a141b4afdbeb0dc421df1fe7f723c5aa5b232832b2a88 WatchSource:0}: Error finding container db28310070d1e755dc1a141b4afdbeb0dc421df1fe7f723c5aa5b232832b2a88: Status 404 returned error can't find the container with id db28310070d1e755dc1a141b4afdbeb0dc421df1fe7f723c5aa5b232832b2a88 Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.952949 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b\") " pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:40 crc kubenswrapper[4760]: I1124 17:18:40.997576 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.096631 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-bnfkl"] Nov 24 17:18:41 crc kubenswrapper[4760]: W1124 17:18:41.114640 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9bf53ae_4ba1_4619_b603_550b974e1970.slice/crio-c044da545134d09f8aa6d7227d22f8ebef2000b9c1ccafa96ccc7c58ebae7655 WatchSource:0}: Error finding container c044da545134d09f8aa6d7227d22f8ebef2000b9c1ccafa96ccc7c58ebae7655: Status 404 returned error can't find the container with id c044da545134d09f8aa6d7227d22f8ebef2000b9c1ccafa96ccc7c58ebae7655 Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.136653 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.171735 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.222776 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbpx5\" (UniqueName: \"kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5\") pod \"0181220f-f87c-44b7-b544-a38e518fdaf6\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223144 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config\") pod \"28ae214e-91a1-4366-bad6-2afdfd94e760\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223167 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config\") pod \"0181220f-f87c-44b7-b544-a38e518fdaf6\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223642 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config" (OuterVolumeSpecName: "config") pod "28ae214e-91a1-4366-bad6-2afdfd94e760" (UID: "28ae214e-91a1-4366-bad6-2afdfd94e760"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223715 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc\") pod \"0181220f-f87c-44b7-b544-a38e518fdaf6\" (UID: \"0181220f-f87c-44b7-b544-a38e518fdaf6\") " Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223741 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrjhp\" (UniqueName: \"kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp\") pod \"28ae214e-91a1-4366-bad6-2afdfd94e760\" (UID: \"28ae214e-91a1-4366-bad6-2afdfd94e760\") " Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.223737 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config" (OuterVolumeSpecName: "config") pod "0181220f-f87c-44b7-b544-a38e518fdaf6" (UID: "0181220f-f87c-44b7-b544-a38e518fdaf6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.224163 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28ae214e-91a1-4366-bad6-2afdfd94e760-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.224185 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.224979 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0181220f-f87c-44b7-b544-a38e518fdaf6" (UID: "0181220f-f87c-44b7-b544-a38e518fdaf6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.227695 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp" (OuterVolumeSpecName: "kube-api-access-lrjhp") pod "28ae214e-91a1-4366-bad6-2afdfd94e760" (UID: "28ae214e-91a1-4366-bad6-2afdfd94e760"). InnerVolumeSpecName "kube-api-access-lrjhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.227813 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5" (OuterVolumeSpecName: "kube-api-access-hbpx5") pod "0181220f-f87c-44b7-b544-a38e518fdaf6" (UID: "0181220f-f87c-44b7-b544-a38e518fdaf6"). InnerVolumeSpecName "kube-api-access-hbpx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.325849 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbpx5\" (UniqueName: \"kubernetes.io/projected/0181220f-f87c-44b7-b544-a38e518fdaf6-kube-api-access-hbpx5\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.325872 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0181220f-f87c-44b7-b544-a38e518fdaf6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.325881 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrjhp\" (UniqueName: \"kubernetes.io/projected/28ae214e-91a1-4366-bad6-2afdfd94e760-kube-api-access-lrjhp\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.432966 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 17:18:41 crc kubenswrapper[4760]: W1124 17:18:41.451702 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20f2f51e_4902_44f9_97d6_1ebf12c22ad6.slice/crio-3cdefe03cc6e333875674c72b459009381c74ec278ba60ea9b956e57148566be WatchSource:0}: Error finding container 3cdefe03cc6e333875674c72b459009381c74ec278ba60ea9b956e57148566be: Status 404 returned error can't find the container with id 3cdefe03cc6e333875674c72b459009381c74ec278ba60ea9b956e57148566be Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.473140 4760 generic.go:334] "Generic (PLEG): container finished" podID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerID="870672f03bf229476a9befb06991122787d83cbd6011067fb754cc5c2b4e9cdf" exitCode=0 Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.480753 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bnfkl" event={"ID":"f9bf53ae-4ba1-4619-b603-550b974e1970","Type":"ContainerStarted","Data":"c044da545134d09f8aa6d7227d22f8ebef2000b9c1ccafa96ccc7c58ebae7655"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.480790 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerDied","Data":"870672f03bf229476a9befb06991122787d83cbd6011067fb754cc5c2b4e9cdf"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.480802 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20f2f51e-4902-44f9-97d6-1ebf12c22ad6","Type":"ContainerStarted","Data":"3cdefe03cc6e333875674c72b459009381c74ec278ba60ea9b956e57148566be"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.480814 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54671374-7f83-4a6d-98a2-c5371e84a5f7","Type":"ContainerStarted","Data":"c38a2a8a988e4ce5dda78ac28a7b9863b28bf2ad9d50e92ee4fdd624dfce9018"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.480826 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5","Type":"ContainerStarted","Data":"6e9550f5d9ae565a160549ac02e1250826dffa79fa45feb5936c3d71507f890b"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.499796 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"5645a4cb-e092-4b3f-a704-c3497f304e80","Type":"ContainerStarted","Data":"00ed2eaf398c995b1ac86f233ec79d7577016823b7b11bb2f49b7a151026bf38"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.507423 4760 generic.go:334] "Generic (PLEG): container finished" podID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerID="682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e" exitCode=0 Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.507492 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" event={"ID":"b5fccb9a-e3e5-44ed-8dea-34814910b15c","Type":"ContainerDied","Data":"682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.507519 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" event={"ID":"b5fccb9a-e3e5-44ed-8dea-34814910b15c","Type":"ContainerStarted","Data":"7bd5a61e08b0565a139799c1394fd049f0419e95cb1a221028ac7c720bdd75b6"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.515879 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" event={"ID":"0181220f-f87c-44b7-b544-a38e518fdaf6","Type":"ContainerDied","Data":"8dcadadba5cf3138ea01e314c762eeaf12eb71631be4e3f3c7e392f4e594bfd0"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.515987 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mcw7g" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.524911 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm" event={"ID":"39e10c47-4e85-46de-a754-3ee0245718d7","Type":"ContainerStarted","Data":"db28310070d1e755dc1a141b4afdbeb0dc421df1fe7f723c5aa5b232832b2a88"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.552181 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerStarted","Data":"7e1a8f84326597411b1acf98e2d315900e1134e18b9f60fd3f868fe856e4d2d4"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.556781 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" event={"ID":"28ae214e-91a1-4366-bad6-2afdfd94e760","Type":"ContainerDied","Data":"b703e66beedf40363ad3184012a6927c69075261439ed4680f18fe6c6c4f8c2d"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.556857 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5wrpr" Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.560621 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerStarted","Data":"0cf61669b8e9c7b907848d7b48ca1faeb074a03ce6f11b16c73bddb014cbfeb0"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.567637 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e072af6f-796e-4c4c-b7fa-a36ad7b972be","Type":"ContainerStarted","Data":"244ff7ffe64141b9baa88daa5ceb152a9a3583bb379529282e33a138fb6a3a67"} Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.586233 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.593575 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mcw7g"] Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.611244 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.616125 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5wrpr"] Nov 24 17:18:41 crc kubenswrapper[4760]: I1124 17:18:41.661834 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 17:18:41 crc kubenswrapper[4760]: W1124 17:18:41.824226 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4f78e44_1a5c_45c4_a9f5_5ca852e0fc0b.slice/crio-1a9de01dc2bc0e376fefb8f8384e8b8b120c6958ddd8d58f376c1a43bb691cb6 WatchSource:0}: Error finding container 1a9de01dc2bc0e376fefb8f8384e8b8b120c6958ddd8d58f376c1a43bb691cb6: Status 404 returned error can't find the container with id 1a9de01dc2bc0e376fefb8f8384e8b8b120c6958ddd8d58f376c1a43bb691cb6 Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.578270 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerStarted","Data":"77e25888158692331913231d04742d590461d3adba2d1d2197fdab76c90bb5c6"} Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.578904 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.581163 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" event={"ID":"b5fccb9a-e3e5-44ed-8dea-34814910b15c","Type":"ContainerStarted","Data":"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e"} Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.581612 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.582626 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b","Type":"ContainerStarted","Data":"1a9de01dc2bc0e376fefb8f8384e8b8b120c6958ddd8d58f376c1a43bb691cb6"} Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.613568 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" podStartSLOduration=6.877166108 podStartE2EDuration="16.613552715s" podCreationTimestamp="2025-11-24 17:18:26 +0000 UTC" firstStartedPulling="2025-11-24 17:18:30.40455603 +0000 UTC m=+905.727437580" lastFinishedPulling="2025-11-24 17:18:40.140942637 +0000 UTC m=+915.463824187" observedRunningTime="2025-11-24 17:18:42.598676656 +0000 UTC m=+917.921558206" watchObservedRunningTime="2025-11-24 17:18:42.613552715 +0000 UTC m=+917.936434265" Nov 24 17:18:42 crc kubenswrapper[4760]: I1124 17:18:42.616049 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" podStartSLOduration=15.616042903 podStartE2EDuration="15.616042903s" podCreationTimestamp="2025-11-24 17:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:18:42.612574138 +0000 UTC m=+917.935455688" watchObservedRunningTime="2025-11-24 17:18:42.616042903 +0000 UTC m=+917.938924453" Nov 24 17:18:43 crc kubenswrapper[4760]: I1124 17:18:43.477305 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0181220f-f87c-44b7-b544-a38e518fdaf6" path="/var/lib/kubelet/pods/0181220f-f87c-44b7-b544-a38e518fdaf6/volumes" Nov 24 17:18:43 crc kubenswrapper[4760]: I1124 17:18:43.478288 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28ae214e-91a1-4366-bad6-2afdfd94e760" path="/var/lib/kubelet/pods/28ae214e-91a1-4366-bad6-2afdfd94e760/volumes" Nov 24 17:18:46 crc kubenswrapper[4760]: I1124 17:18:46.924328 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:47 crc kubenswrapper[4760]: I1124 17:18:47.367268 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:18:47 crc kubenswrapper[4760]: I1124 17:18:47.439794 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:47 crc kubenswrapper[4760]: I1124 17:18:47.692914 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="dnsmasq-dns" containerID="cri-o://77e25888158692331913231d04742d590461d3adba2d1d2197fdab76c90bb5c6" gracePeriod=10 Nov 24 17:18:48 crc kubenswrapper[4760]: I1124 17:18:48.737884 4760 generic.go:334] "Generic (PLEG): container finished" podID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerID="77e25888158692331913231d04742d590461d3adba2d1d2197fdab76c90bb5c6" exitCode=0 Nov 24 17:18:48 crc kubenswrapper[4760]: I1124 17:18:48.738075 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerDied","Data":"77e25888158692331913231d04742d590461d3adba2d1d2197fdab76c90bb5c6"} Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.006778 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.052494 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc\") pod \"82cf1c78-b285-4d26-9d47-693dbef6b473\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.052588 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tl24z\" (UniqueName: \"kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z\") pod \"82cf1c78-b285-4d26-9d47-693dbef6b473\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.052658 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config\") pod \"82cf1c78-b285-4d26-9d47-693dbef6b473\" (UID: \"82cf1c78-b285-4d26-9d47-693dbef6b473\") " Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.058361 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z" (OuterVolumeSpecName: "kube-api-access-tl24z") pod "82cf1c78-b285-4d26-9d47-693dbef6b473" (UID: "82cf1c78-b285-4d26-9d47-693dbef6b473"). InnerVolumeSpecName "kube-api-access-tl24z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.091590 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config" (OuterVolumeSpecName: "config") pod "82cf1c78-b285-4d26-9d47-693dbef6b473" (UID: "82cf1c78-b285-4d26-9d47-693dbef6b473"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.093800 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82cf1c78-b285-4d26-9d47-693dbef6b473" (UID: "82cf1c78-b285-4d26-9d47-693dbef6b473"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.154771 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.154817 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tl24z\" (UniqueName: \"kubernetes.io/projected/82cf1c78-b285-4d26-9d47-693dbef6b473-kube-api-access-tl24z\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.154833 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82cf1c78-b285-4d26-9d47-693dbef6b473-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.744705 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"5645a4cb-e092-4b3f-a704-c3497f304e80","Type":"ContainerStarted","Data":"042d8e97dd809b3ea8c12f2ffbd7f8f5d18a045d1224774f07b78f658e70c2be"} Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.745290 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.748060 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e072af6f-796e-4c4c-b7fa-a36ad7b972be","Type":"ContainerStarted","Data":"c38650529ff7bb1f652440b4d7828f1b384cd4c7c68660e1524bf0e30248c1eb"} Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.751284 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b","Type":"ContainerStarted","Data":"8c16e69c37e593ec9c980512aa784bf7f86cb81346ab403552879ff03dbbbf78"} Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.753023 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" event={"ID":"82cf1c78-b285-4d26-9d47-693dbef6b473","Type":"ContainerDied","Data":"4793525f36b572a99a87c975fcdccc6224a1f5f49024be9a6fe619b8fc5cbac7"} Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.753052 4760 scope.go:117] "RemoveContainer" containerID="77e25888158692331913231d04742d590461d3adba2d1d2197fdab76c90bb5c6" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.753136 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-fzbmc" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.761960 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=11.003374385 podStartE2EDuration="18.761942761s" podCreationTimestamp="2025-11-24 17:18:31 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.796586848 +0000 UTC m=+916.119468388" lastFinishedPulling="2025-11-24 17:18:48.555155174 +0000 UTC m=+923.878036764" observedRunningTime="2025-11-24 17:18:49.758678041 +0000 UTC m=+925.081559591" watchObservedRunningTime="2025-11-24 17:18:49.761942761 +0000 UTC m=+925.084824311" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.772447 4760 scope.go:117] "RemoveContainer" containerID="870672f03bf229476a9befb06991122787d83cbd6011067fb754cc5c2b4e9cdf" Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.774904 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:49 crc kubenswrapper[4760]: I1124 17:18:49.779911 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-fzbmc"] Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.762641 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5","Type":"ContainerStarted","Data":"fdeae517b2036cdb2932683ae07ffc79c3458e7e788d08f87bfa638a205b339f"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.763992 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerStarted","Data":"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.766832 4760 generic.go:334] "Generic (PLEG): container finished" podID="f9bf53ae-4ba1-4619-b603-550b974e1970" containerID="9a1f045e893312800bb9783eb63aa9ee1c06969e62247fd79ea1553f57c774ee" exitCode=0 Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.766899 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bnfkl" event={"ID":"f9bf53ae-4ba1-4619-b603-550b974e1970","Type":"ContainerDied","Data":"9a1f045e893312800bb9783eb63aa9ee1c06969e62247fd79ea1553f57c774ee"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.769404 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm" event={"ID":"39e10c47-4e85-46de-a754-3ee0245718d7","Type":"ContainerStarted","Data":"ac8a94412ef4cbd2a002f452d97560b128646bab9418b5ed25921cf0674cb5b2"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.769730 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-dl9cm" Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.771965 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20f2f51e-4902-44f9-97d6-1ebf12c22ad6","Type":"ContainerStarted","Data":"64c73ebdb26511af983320b8a08e83863d6074fc790c5a8801bb4b0ee831bae5"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.774627 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerStarted","Data":"ae3c541e7631907d00510787756cfe3edbe148432975656cb203a8e3db203fd5"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.776325 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54671374-7f83-4a6d-98a2-c5371e84a5f7","Type":"ContainerStarted","Data":"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc"} Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.841809 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-dl9cm" podStartSLOduration=6.806532292 podStartE2EDuration="14.841790317s" podCreationTimestamp="2025-11-24 17:18:36 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.955733194 +0000 UTC m=+916.278614744" lastFinishedPulling="2025-11-24 17:18:48.990991219 +0000 UTC m=+924.313872769" observedRunningTime="2025-11-24 17:18:50.827687139 +0000 UTC m=+926.150568699" watchObservedRunningTime="2025-11-24 17:18:50.841790317 +0000 UTC m=+926.164671867" Nov 24 17:18:50 crc kubenswrapper[4760]: I1124 17:18:50.881361 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.248804245 podStartE2EDuration="17.881346325s" podCreationTimestamp="2025-11-24 17:18:33 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.786162771 +0000 UTC m=+916.109044321" lastFinishedPulling="2025-11-24 17:18:49.418704811 +0000 UTC m=+924.741586401" observedRunningTime="2025-11-24 17:18:50.879677849 +0000 UTC m=+926.202559399" watchObservedRunningTime="2025-11-24 17:18:50.881346325 +0000 UTC m=+926.204227875" Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.479105 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" path="/var/lib/kubelet/pods/82cf1c78-b285-4d26-9d47-693dbef6b473/volumes" Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.785627 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bnfkl" event={"ID":"f9bf53ae-4ba1-4619-b603-550b974e1970","Type":"ContainerStarted","Data":"7ff685295a805943ff0b31c6a230dab49af9f75a1bba1e65086c262124aec63e"} Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.785962 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bnfkl" event={"ID":"f9bf53ae-4ba1-4619-b603-550b974e1970","Type":"ContainerStarted","Data":"904c7cf338cfb39e413e172649524bbb6d1606abd0b0110cb9767d1d9ae5155d"} Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.786615 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.786643 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:51 crc kubenswrapper[4760]: I1124 17:18:51.806221 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-bnfkl" podStartSLOduration=8.154268486 podStartE2EDuration="15.80620448s" podCreationTimestamp="2025-11-24 17:18:36 +0000 UTC" firstStartedPulling="2025-11-24 17:18:41.11703572 +0000 UTC m=+916.439917270" lastFinishedPulling="2025-11-24 17:18:48.768971704 +0000 UTC m=+924.091853264" observedRunningTime="2025-11-24 17:18:51.803045223 +0000 UTC m=+927.125926773" watchObservedRunningTime="2025-11-24 17:18:51.80620448 +0000 UTC m=+927.129086030" Nov 24 17:18:52 crc kubenswrapper[4760]: I1124 17:18:52.018930 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.805502 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20f2f51e-4902-44f9-97d6-1ebf12c22ad6","Type":"ContainerStarted","Data":"99bd2aaeb9a39218ba5ba76ad9193bb9aec7651cdfe512901a096f7fcdd6d81d"} Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.807979 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad1c45c2-91fc-4d03-9778-1f8ac8b891e5" containerID="fdeae517b2036cdb2932683ae07ffc79c3458e7e788d08f87bfa638a205b339f" exitCode=0 Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.808077 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5","Type":"ContainerDied","Data":"fdeae517b2036cdb2932683ae07ffc79c3458e7e788d08f87bfa638a205b339f"} Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.810255 4760 generic.go:334] "Generic (PLEG): container finished" podID="e072af6f-796e-4c4c-b7fa-a36ad7b972be" containerID="c38650529ff7bb1f652440b4d7828f1b384cd4c7c68660e1524bf0e30248c1eb" exitCode=0 Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.810327 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e072af6f-796e-4c4c-b7fa-a36ad7b972be","Type":"ContainerDied","Data":"c38650529ff7bb1f652440b4d7828f1b384cd4c7c68660e1524bf0e30248c1eb"} Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.817437 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b","Type":"ContainerStarted","Data":"2654dd8923d26d7045c8ae4afce1a3d608f5cd8318bad06d8a61e0967c400737"} Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.852398 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.526658366 podStartE2EDuration="14.852333919s" podCreationTimestamp="2025-11-24 17:18:39 +0000 UTC" firstStartedPulling="2025-11-24 17:18:41.454736777 +0000 UTC m=+916.777618327" lastFinishedPulling="2025-11-24 17:18:52.78041232 +0000 UTC m=+928.103293880" observedRunningTime="2025-11-24 17:18:53.832794042 +0000 UTC m=+929.155675622" watchObservedRunningTime="2025-11-24 17:18:53.852333919 +0000 UTC m=+929.175215499" Nov 24 17:18:53 crc kubenswrapper[4760]: I1124 17:18:53.872177 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.939186129 podStartE2EDuration="14.872153064s" podCreationTimestamp="2025-11-24 17:18:39 +0000 UTC" firstStartedPulling="2025-11-24 17:18:41.840291849 +0000 UTC m=+917.163173399" lastFinishedPulling="2025-11-24 17:18:52.773258784 +0000 UTC m=+928.096140334" observedRunningTime="2025-11-24 17:18:53.865402349 +0000 UTC m=+929.188283969" watchObservedRunningTime="2025-11-24 17:18:53.872153064 +0000 UTC m=+929.195034614" Nov 24 17:18:54 crc kubenswrapper[4760]: I1124 17:18:54.828886 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ad1c45c2-91fc-4d03-9778-1f8ac8b891e5","Type":"ContainerStarted","Data":"ac61c93fe8c76987f2d55fe3aac8fb70c638f9ac12f6e53598542a2c44458581"} Nov 24 17:18:54 crc kubenswrapper[4760]: I1124 17:18:54.831367 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e072af6f-796e-4c4c-b7fa-a36ad7b972be","Type":"ContainerStarted","Data":"a3167f163e297fbdc91553607fdce64ae6bfb8ee88ebe29fa5597608ffef6600"} Nov 24 17:18:54 crc kubenswrapper[4760]: I1124 17:18:54.862700 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=18.796722296 podStartE2EDuration="26.862676565s" podCreationTimestamp="2025-11-24 17:18:28 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.925870383 +0000 UTC m=+916.248751933" lastFinishedPulling="2025-11-24 17:18:48.991824652 +0000 UTC m=+924.314706202" observedRunningTime="2025-11-24 17:18:54.855362034 +0000 UTC m=+930.178243584" watchObservedRunningTime="2025-11-24 17:18:54.862676565 +0000 UTC m=+930.185558125" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.738620 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.738798 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.814807 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.858113 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=18.889335393 podStartE2EDuration="26.858082259s" podCreationTimestamp="2025-11-24 17:18:29 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.800846715 +0000 UTC m=+916.123728265" lastFinishedPulling="2025-11-24 17:18:48.769593581 +0000 UTC m=+924.092475131" observedRunningTime="2025-11-24 17:18:54.88430605 +0000 UTC m=+930.207187600" watchObservedRunningTime="2025-11-24 17:18:55.858082259 +0000 UTC m=+931.180963849" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.926286 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.998260 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:55 crc kubenswrapper[4760]: I1124 17:18:55.998316 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.047555 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.181837 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:56 crc kubenswrapper[4760]: E1124 17:18:56.182274 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="dnsmasq-dns" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.182291 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="dnsmasq-dns" Nov 24 17:18:56 crc kubenswrapper[4760]: E1124 17:18:56.182325 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="init" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.182333 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="init" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.182509 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="82cf1c78-b285-4d26-9d47-693dbef6b473" containerName="dnsmasq-dns" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.183499 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.187225 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.195533 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.222208 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-sq2th"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.223092 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.228936 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.235749 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sq2th"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.302648 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.302717 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.302795 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.303087 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klc4z\" (UniqueName: \"kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.404654 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klc4z\" (UniqueName: \"kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.404713 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-combined-ca-bundle\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.404747 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovs-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.404781 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.404796 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz22x\" (UniqueName: \"kubernetes.io/projected/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-kube-api-access-lz22x\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405075 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405150 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405279 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovn-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405341 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-config\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405381 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405621 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405821 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.405933 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.433667 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klc4z\" (UniqueName: \"kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z\") pod \"dnsmasq-dns-7fd796d7df-rv2w2\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.501468 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506287 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506345 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovn-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506370 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-config\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506400 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-combined-ca-bundle\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506427 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovs-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506454 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz22x\" (UniqueName: \"kubernetes.io/projected/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-kube-api-access-lz22x\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506665 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovn-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.506734 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-ovs-rundir\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.507317 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-config\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.509512 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-combined-ca-bundle\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.519477 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.525541 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz22x\" (UniqueName: \"kubernetes.io/projected/f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a-kube-api-access-lz22x\") pod \"ovn-controller-metrics-sq2th\" (UID: \"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a\") " pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.541592 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sq2th" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.602287 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.638806 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.640023 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.642651 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.644742 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.663847 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.812851 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.814184 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.814242 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6xmp\" (UniqueName: \"kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.814261 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.814313 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.890925 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.917420 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.917478 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6xmp\" (UniqueName: \"kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.917500 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.917571 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.918657 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.919263 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.918202 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.945240 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.945255 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:56 crc kubenswrapper[4760]: I1124 17:18:56.962650 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6xmp\" (UniqueName: \"kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp\") pod \"dnsmasq-dns-86db49b7ff-s8dmb\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.010922 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.034627 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.096979 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.098302 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.100296 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.100353 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-fb7gq" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.101334 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.102347 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.128740 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.188199 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sq2th"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231298 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231380 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-scripts\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231403 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-config\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231424 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j657c\" (UniqueName: \"kubernetes.io/projected/53891de3-058b-46b8-b7f4-880ca70c1de3-kube-api-access-j657c\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231445 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231469 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.231484 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333466 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333543 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-scripts\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333569 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-config\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333591 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j657c\" (UniqueName: \"kubernetes.io/projected/53891de3-058b-46b8-b7f4-880ca70c1de3-kube-api-access-j657c\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333608 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333635 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.333650 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.335643 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-scripts\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.336106 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.336842 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53891de3-058b-46b8-b7f4-880ca70c1de3-config\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.338582 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.339919 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.340421 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53891de3-058b-46b8-b7f4-880ca70c1de3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.355267 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j657c\" (UniqueName: \"kubernetes.io/projected/53891de3-058b-46b8-b7f4-880ca70c1de3-kube-api-access-j657c\") pod \"ovn-northd-0\" (UID: \"53891de3-058b-46b8-b7f4-880ca70c1de3\") " pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.433205 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.574338 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.858256 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.864794 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sq2th" event={"ID":"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a","Type":"ContainerStarted","Data":"2fd4f066f051776c24e81302585f7d085d4b42c92ea373510ae27ca11cbf6d1d"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.864852 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sq2th" event={"ID":"f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a","Type":"ContainerStarted","Data":"2b69131573186b77854239f0236aaabd7ef106e78d63a89ea2c30e2e5fca5e9c"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.870019 4760 generic.go:334] "Generic (PLEG): container finished" podID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerID="b3f152069b2a7dee7f5a13f9272837d91fe5084e0326380ac66714df9597a7fc" exitCode=0 Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.870125 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" event={"ID":"7373f65f-5ddc-428e-911c-fad3889ee3d2","Type":"ContainerDied","Data":"b3f152069b2a7dee7f5a13f9272837d91fe5084e0326380ac66714df9597a7fc"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.870153 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" event={"ID":"7373f65f-5ddc-428e-911c-fad3889ee3d2","Type":"ContainerStarted","Data":"c4bc96533505876eae73723876f4e2d63d3018214dfe196886330ca1212bc676"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.872753 4760 generic.go:334] "Generic (PLEG): container finished" podID="260ca346-b6be-4d23-a928-c286a786a826" containerID="09c099f20d26861df7ed7c2b92c87e9b36edc99160f0f48cc08e81276c52bde4" exitCode=0 Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.872782 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" event={"ID":"260ca346-b6be-4d23-a928-c286a786a826","Type":"ContainerDied","Data":"09c099f20d26861df7ed7c2b92c87e9b36edc99160f0f48cc08e81276c52bde4"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.872817 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" event={"ID":"260ca346-b6be-4d23-a928-c286a786a826","Type":"ContainerStarted","Data":"5c6db63e04f4f6d6e648c096c115cd43e9df465b0426e7aaefcb8adc5df7f243"} Nov 24 17:18:57 crc kubenswrapper[4760]: I1124 17:18:57.893714 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-sq2th" podStartSLOduration=1.89369369 podStartE2EDuration="1.89369369s" podCreationTimestamp="2025-11-24 17:18:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:18:57.88787113 +0000 UTC m=+933.210752720" watchObservedRunningTime="2025-11-24 17:18:57.89369369 +0000 UTC m=+933.216575240" Nov 24 17:18:57 crc kubenswrapper[4760]: W1124 17:18:57.894915 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53891de3_058b_46b8_b7f4_880ca70c1de3.slice/crio-f2685700afc2b9d8420a273e08300755e4e0471f8dc9c600375ad74e577fa890 WatchSource:0}: Error finding container f2685700afc2b9d8420a273e08300755e4e0471f8dc9c600375ad74e577fa890: Status 404 returned error can't find the container with id f2685700afc2b9d8420a273e08300755e4e0471f8dc9c600375ad74e577fa890 Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.225725 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.355708 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb\") pod \"260ca346-b6be-4d23-a928-c286a786a826\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.355824 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klc4z\" (UniqueName: \"kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z\") pod \"260ca346-b6be-4d23-a928-c286a786a826\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.355857 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc\") pod \"260ca346-b6be-4d23-a928-c286a786a826\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.355878 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config\") pod \"260ca346-b6be-4d23-a928-c286a786a826\" (UID: \"260ca346-b6be-4d23-a928-c286a786a826\") " Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.360728 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z" (OuterVolumeSpecName: "kube-api-access-klc4z") pod "260ca346-b6be-4d23-a928-c286a786a826" (UID: "260ca346-b6be-4d23-a928-c286a786a826"). InnerVolumeSpecName "kube-api-access-klc4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.374597 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "260ca346-b6be-4d23-a928-c286a786a826" (UID: "260ca346-b6be-4d23-a928-c286a786a826"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.376515 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config" (OuterVolumeSpecName: "config") pod "260ca346-b6be-4d23-a928-c286a786a826" (UID: "260ca346-b6be-4d23-a928-c286a786a826"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.377712 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "260ca346-b6be-4d23-a928-c286a786a826" (UID: "260ca346-b6be-4d23-a928-c286a786a826"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.457899 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.458248 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klc4z\" (UniqueName: \"kubernetes.io/projected/260ca346-b6be-4d23-a928-c286a786a826-kube-api-access-klc4z\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.458265 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.458277 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/260ca346-b6be-4d23-a928-c286a786a826-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.885319 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" event={"ID":"7373f65f-5ddc-428e-911c-fad3889ee3d2","Type":"ContainerStarted","Data":"3b51a8bb560efea714fe270d33737daa4a13828cdf9943af4976f22f39e39be6"} Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.885464 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.887828 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" event={"ID":"260ca346-b6be-4d23-a928-c286a786a826","Type":"ContainerDied","Data":"5c6db63e04f4f6d6e648c096c115cd43e9df465b0426e7aaefcb8adc5df7f243"} Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.887871 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rv2w2" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.887874 4760 scope.go:117] "RemoveContainer" containerID="09c099f20d26861df7ed7c2b92c87e9b36edc99160f0f48cc08e81276c52bde4" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.889467 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"53891de3-058b-46b8-b7f4-880ca70c1de3","Type":"ContainerStarted","Data":"f2685700afc2b9d8420a273e08300755e4e0471f8dc9c600375ad74e577fa890"} Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.905559 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" podStartSLOduration=2.905543647 podStartE2EDuration="2.905543647s" podCreationTimestamp="2025-11-24 17:18:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:18:58.903430479 +0000 UTC m=+934.226312039" watchObservedRunningTime="2025-11-24 17:18:58.905543647 +0000 UTC m=+934.228425197" Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.965658 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:58 crc kubenswrapper[4760]: I1124 17:18:58.970410 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rv2w2"] Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.480926 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="260ca346-b6be-4d23-a928-c286a786a826" path="/var/lib/kubelet/pods/260ca346-b6be-4d23-a928-c286a786a826/volumes" Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.879720 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.880246 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.903300 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"53891de3-058b-46b8-b7f4-880ca70c1de3","Type":"ContainerStarted","Data":"1bdcbb2fd473bdd3487ccf6361cc1d26d309e6b70e8cde737652490c5136c028"} Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.903372 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"53891de3-058b-46b8-b7f4-880ca70c1de3","Type":"ContainerStarted","Data":"d2e22968571a224ce7560a96de37ffa95bf7ade7f7e1cf79ee4aeb99ae3499f7"} Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.903925 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.930282 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.8850523209999999 podStartE2EDuration="2.930257826s" podCreationTimestamp="2025-11-24 17:18:57 +0000 UTC" firstStartedPulling="2025-11-24 17:18:57.907427727 +0000 UTC m=+933.230309277" lastFinishedPulling="2025-11-24 17:18:58.952633232 +0000 UTC m=+934.275514782" observedRunningTime="2025-11-24 17:18:59.924633802 +0000 UTC m=+935.247515362" watchObservedRunningTime="2025-11-24 17:18:59.930257826 +0000 UTC m=+935.253139416" Nov 24 17:18:59 crc kubenswrapper[4760]: I1124 17:18:59.984192 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 24 17:19:00 crc kubenswrapper[4760]: I1124 17:19:00.087614 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.255838 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.256254 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.284345 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-3d80-account-create-s4vnd"] Nov 24 17:19:01 crc kubenswrapper[4760]: E1124 17:19:01.290199 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="260ca346-b6be-4d23-a928-c286a786a826" containerName="init" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.290280 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="260ca346-b6be-4d23-a928-c286a786a826" containerName="init" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.290514 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="260ca346-b6be-4d23-a928-c286a786a826" containerName="init" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.291108 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.293917 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.296928 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3d80-account-create-s4vnd"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.338853 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-gvrlj"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.339868 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.346343 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gvrlj"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.377572 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.416554 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8t9g\" (UniqueName: \"kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.416642 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.416695 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh2q4\" (UniqueName: \"kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.416730 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.519157 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.519232 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh2q4\" (UniqueName: \"kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.519265 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.519995 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.520174 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8t9g\" (UniqueName: \"kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.522984 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.536928 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-5ktbp"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.538458 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.541587 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh2q4\" (UniqueName: \"kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4\") pod \"keystone-db-create-gvrlj\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.544638 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8t9g\" (UniqueName: \"kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g\") pod \"keystone-3d80-account-create-s4vnd\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.550602 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5ktbp"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.613774 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.621226 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgbhc\" (UniqueName: \"kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.621285 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.658565 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.667162 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9575-account-create-8zpxz"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.668888 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.672026 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.675717 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9575-account-create-8zpxz"] Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.722546 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.722629 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgbhc\" (UniqueName: \"kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.722677 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzlsr\" (UniqueName: \"kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.722765 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.723784 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.745069 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgbhc\" (UniqueName: \"kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc\") pod \"placement-db-create-5ktbp\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.824511 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.824565 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzlsr\" (UniqueName: \"kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.825573 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.852154 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzlsr\" (UniqueName: \"kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr\") pod \"placement-9575-account-create-8zpxz\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.912237 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:01 crc kubenswrapper[4760]: I1124 17:19:01.988231 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.088486 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.123878 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3d80-account-create-s4vnd"] Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.176150 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gvrlj"] Nov 24 17:19:02 crc kubenswrapper[4760]: W1124 17:19:02.185447 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f38e58f_36e7_44fd_8df7_c4443e47d534.slice/crio-6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03 WatchSource:0}: Error finding container 6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03: Status 404 returned error can't find the container with id 6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03 Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.338257 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5ktbp"] Nov 24 17:19:02 crc kubenswrapper[4760]: W1124 17:19:02.343099 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9460517b_9491_47de_bf02_3809732142c9.slice/crio-4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947 WatchSource:0}: Error finding container 4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947: Status 404 returned error can't find the container with id 4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947 Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.520179 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9575-account-create-8zpxz"] Nov 24 17:19:02 crc kubenswrapper[4760]: W1124 17:19:02.540448 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97150276_8639_4c2e_9324_9c3c840f58ec.slice/crio-5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b WatchSource:0}: Error finding container 5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b: Status 404 returned error can't find the container with id 5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.938561 4760 generic.go:334] "Generic (PLEG): container finished" podID="59f377ad-49a5-4ead-a3ab-10a3796a1cf5" containerID="62a259d59e77310b26333272635c8bc7757823d91b2f3b6099832eed6285f524" exitCode=0 Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.939553 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3d80-account-create-s4vnd" event={"ID":"59f377ad-49a5-4ead-a3ab-10a3796a1cf5","Type":"ContainerDied","Data":"62a259d59e77310b26333272635c8bc7757823d91b2f3b6099832eed6285f524"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.939591 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3d80-account-create-s4vnd" event={"ID":"59f377ad-49a5-4ead-a3ab-10a3796a1cf5","Type":"ContainerStarted","Data":"f2f2a6092113a6ee60a12fd7ea3bf0f04d09bd447a67c40486ee65a3ad08c7f8"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.943914 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9575-account-create-8zpxz" event={"ID":"97150276-8639-4c2e-9324-9c3c840f58ec","Type":"ContainerStarted","Data":"fc3d6defba9a4c29f557db5c8f8eb47a067b2f3bb6017c9608dd683671424f9c"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.943968 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9575-account-create-8zpxz" event={"ID":"97150276-8639-4c2e-9324-9c3c840f58ec","Type":"ContainerStarted","Data":"5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.946161 4760 generic.go:334] "Generic (PLEG): container finished" podID="2f38e58f-36e7-44fd-8df7-c4443e47d534" containerID="db48b3903be23294c4e730f020ec3006d5ea827baca81e7a0abedac397c1b956" exitCode=0 Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.946343 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gvrlj" event={"ID":"2f38e58f-36e7-44fd-8df7-c4443e47d534","Type":"ContainerDied","Data":"db48b3903be23294c4e730f020ec3006d5ea827baca81e7a0abedac397c1b956"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.946380 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gvrlj" event={"ID":"2f38e58f-36e7-44fd-8df7-c4443e47d534","Type":"ContainerStarted","Data":"6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.948039 4760 generic.go:334] "Generic (PLEG): container finished" podID="9460517b-9491-47de-bf02-3809732142c9" containerID="5b25f3663d2b7cb6cfde38ab289c2a57aeb55d4e264e7c88b7684feb0e46591b" exitCode=0 Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.948138 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5ktbp" event={"ID":"9460517b-9491-47de-bf02-3809732142c9","Type":"ContainerDied","Data":"5b25f3663d2b7cb6cfde38ab289c2a57aeb55d4e264e7c88b7684feb0e46591b"} Nov 24 17:19:02 crc kubenswrapper[4760]: I1124 17:19:02.948197 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5ktbp" event={"ID":"9460517b-9491-47de-bf02-3809732142c9","Type":"ContainerStarted","Data":"4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947"} Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.002253 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-9575-account-create-8zpxz" podStartSLOduration=2.002228698 podStartE2EDuration="2.002228698s" podCreationTimestamp="2025-11-24 17:19:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:02.999314378 +0000 UTC m=+938.322195928" watchObservedRunningTime="2025-11-24 17:19:03.002228698 +0000 UTC m=+938.325110288" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.549866 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.711776 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.711998 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="dnsmasq-dns" containerID="cri-o://3b51a8bb560efea714fe270d33737daa4a13828cdf9943af4976f22f39e39be6" gracePeriod=10 Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.716731 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.764579 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.765839 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.781819 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.861614 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.861992 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c4v7\" (UniqueName: \"kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.862132 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.862223 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.862258 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.963751 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.963833 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.963864 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.963928 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.963959 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c4v7\" (UniqueName: \"kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.965365 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.965944 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.966454 4760 generic.go:334] "Generic (PLEG): container finished" podID="97150276-8639-4c2e-9324-9c3c840f58ec" containerID="fc3d6defba9a4c29f557db5c8f8eb47a067b2f3bb6017c9608dd683671424f9c" exitCode=0 Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.966525 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9575-account-create-8zpxz" event={"ID":"97150276-8639-4c2e-9324-9c3c840f58ec","Type":"ContainerDied","Data":"fc3d6defba9a4c29f557db5c8f8eb47a067b2f3bb6017c9608dd683671424f9c"} Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.966882 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.967713 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.993239 4760 generic.go:334] "Generic (PLEG): container finished" podID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerID="3b51a8bb560efea714fe270d33737daa4a13828cdf9943af4976f22f39e39be6" exitCode=0 Nov 24 17:19:03 crc kubenswrapper[4760]: I1124 17:19:03.993437 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" event={"ID":"7373f65f-5ddc-428e-911c-fad3889ee3d2","Type":"ContainerDied","Data":"3b51a8bb560efea714fe270d33737daa4a13828cdf9943af4976f22f39e39be6"} Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.003160 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c4v7\" (UniqueName: \"kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7\") pod \"dnsmasq-dns-698758b865-tqdfj\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.108954 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.309972 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.369971 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb\") pod \"7373f65f-5ddc-428e-911c-fad3889ee3d2\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.370049 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6xmp\" (UniqueName: \"kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp\") pod \"7373f65f-5ddc-428e-911c-fad3889ee3d2\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.370232 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb\") pod \"7373f65f-5ddc-428e-911c-fad3889ee3d2\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.370305 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config\") pod \"7373f65f-5ddc-428e-911c-fad3889ee3d2\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.370444 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc\") pod \"7373f65f-5ddc-428e-911c-fad3889ee3d2\" (UID: \"7373f65f-5ddc-428e-911c-fad3889ee3d2\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.373734 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp" (OuterVolumeSpecName: "kube-api-access-j6xmp") pod "7373f65f-5ddc-428e-911c-fad3889ee3d2" (UID: "7373f65f-5ddc-428e-911c-fad3889ee3d2"). InnerVolumeSpecName "kube-api-access-j6xmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.377964 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.420316 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config" (OuterVolumeSpecName: "config") pod "7373f65f-5ddc-428e-911c-fad3889ee3d2" (UID: "7373f65f-5ddc-428e-911c-fad3889ee3d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.428874 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7373f65f-5ddc-428e-911c-fad3889ee3d2" (UID: "7373f65f-5ddc-428e-911c-fad3889ee3d2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.434940 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7373f65f-5ddc-428e-911c-fad3889ee3d2" (UID: "7373f65f-5ddc-428e-911c-fad3889ee3d2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.447326 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7373f65f-5ddc-428e-911c-fad3889ee3d2" (UID: "7373f65f-5ddc-428e-911c-fad3889ee3d2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.471966 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh2q4\" (UniqueName: \"kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4\") pod \"2f38e58f-36e7-44fd-8df7-c4443e47d534\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472124 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts\") pod \"2f38e58f-36e7-44fd-8df7-c4443e47d534\" (UID: \"2f38e58f-36e7-44fd-8df7-c4443e47d534\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472413 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472426 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6xmp\" (UniqueName: \"kubernetes.io/projected/7373f65f-5ddc-428e-911c-fad3889ee3d2-kube-api-access-j6xmp\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472437 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472446 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472455 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7373f65f-5ddc-428e-911c-fad3889ee3d2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.472528 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.476579 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2f38e58f-36e7-44fd-8df7-c4443e47d534" (UID: "2f38e58f-36e7-44fd-8df7-c4443e47d534"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.479785 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.480216 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4" (OuterVolumeSpecName: "kube-api-access-jh2q4") pod "2f38e58f-36e7-44fd-8df7-c4443e47d534" (UID: "2f38e58f-36e7-44fd-8df7-c4443e47d534"). InnerVolumeSpecName "kube-api-access-jh2q4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.573764 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts\") pod \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.573910 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts\") pod \"9460517b-9491-47de-bf02-3809732142c9\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.573956 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgbhc\" (UniqueName: \"kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc\") pod \"9460517b-9491-47de-bf02-3809732142c9\" (UID: \"9460517b-9491-47de-bf02-3809732142c9\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.573987 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8t9g\" (UniqueName: \"kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g\") pod \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\" (UID: \"59f377ad-49a5-4ead-a3ab-10a3796a1cf5\") " Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.574265 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59f377ad-49a5-4ead-a3ab-10a3796a1cf5" (UID: "59f377ad-49a5-4ead-a3ab-10a3796a1cf5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.574298 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh2q4\" (UniqueName: \"kubernetes.io/projected/2f38e58f-36e7-44fd-8df7-c4443e47d534-kube-api-access-jh2q4\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.574310 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f38e58f-36e7-44fd-8df7-c4443e47d534-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.574625 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9460517b-9491-47de-bf02-3809732142c9" (UID: "9460517b-9491-47de-bf02-3809732142c9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.578490 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc" (OuterVolumeSpecName: "kube-api-access-wgbhc") pod "9460517b-9491-47de-bf02-3809732142c9" (UID: "9460517b-9491-47de-bf02-3809732142c9"). InnerVolumeSpecName "kube-api-access-wgbhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.579253 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g" (OuterVolumeSpecName: "kube-api-access-c8t9g") pod "59f377ad-49a5-4ead-a3ab-10a3796a1cf5" (UID: "59f377ad-49a5-4ead-a3ab-10a3796a1cf5"). InnerVolumeSpecName "kube-api-access-c8t9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.675348 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9460517b-9491-47de-bf02-3809732142c9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.675389 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgbhc\" (UniqueName: \"kubernetes.io/projected/9460517b-9491-47de-bf02-3809732142c9-kube-api-access-wgbhc\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.675402 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8t9g\" (UniqueName: \"kubernetes.io/projected/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-kube-api-access-c8t9g\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.675415 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59f377ad-49a5-4ead-a3ab-10a3796a1cf5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:04 crc kubenswrapper[4760]: W1124 17:19:04.727418 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9090ecc_9df5_4a09_8360_9d11fa34833f.slice/crio-677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54 WatchSource:0}: Error finding container 677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54: Status 404 returned error can't find the container with id 677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54 Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.732402 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.879797 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 24 17:19:04 crc kubenswrapper[4760]: E1124 17:19:04.880342 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="dnsmasq-dns" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880355 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="dnsmasq-dns" Nov 24 17:19:04 crc kubenswrapper[4760]: E1124 17:19:04.880369 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9460517b-9491-47de-bf02-3809732142c9" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880375 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="9460517b-9491-47de-bf02-3809732142c9" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: E1124 17:19:04.880400 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f377ad-49a5-4ead-a3ab-10a3796a1cf5" containerName="mariadb-account-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880406 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f377ad-49a5-4ead-a3ab-10a3796a1cf5" containerName="mariadb-account-create" Nov 24 17:19:04 crc kubenswrapper[4760]: E1124 17:19:04.880417 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="init" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880423 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="init" Nov 24 17:19:04 crc kubenswrapper[4760]: E1124 17:19:04.880436 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f38e58f-36e7-44fd-8df7-c4443e47d534" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880442 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f38e58f-36e7-44fd-8df7-c4443e47d534" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880613 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" containerName="dnsmasq-dns" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880630 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="9460517b-9491-47de-bf02-3809732142c9" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880639 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="59f377ad-49a5-4ead-a3ab-10a3796a1cf5" containerName="mariadb-account-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.880650 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f38e58f-36e7-44fd-8df7-c4443e47d534" containerName="mariadb-database-create" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.885700 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.888379 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-cpflt" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.890217 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.890303 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.890216 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.914105 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.979315 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-lock\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.979376 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6gj8\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-kube-api-access-q6gj8\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.979438 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.979496 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-cache\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:04 crc kubenswrapper[4760]: I1124 17:19:04.979518 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.018695 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" event={"ID":"7373f65f-5ddc-428e-911c-fad3889ee3d2","Type":"ContainerDied","Data":"c4bc96533505876eae73723876f4e2d63d3018214dfe196886330ca1212bc676"} Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.018755 4760 scope.go:117] "RemoveContainer" containerID="3b51a8bb560efea714fe270d33737daa4a13828cdf9943af4976f22f39e39be6" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.018721 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-s8dmb" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.020998 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gvrlj" event={"ID":"2f38e58f-36e7-44fd-8df7-c4443e47d534","Type":"ContainerDied","Data":"6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03"} Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.021132 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fee8b69898beff22aa7e2d875b881288b732381b5cd08e486994f268a50fb03" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.021179 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gvrlj" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.033472 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5ktbp" event={"ID":"9460517b-9491-47de-bf02-3809732142c9","Type":"ContainerDied","Data":"4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947"} Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.033503 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4984158e32b56576e0887b65c9c7e3f9c68a0049f59d63882fc4ccc13e0c0947" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.033582 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5ktbp" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.035618 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerStarted","Data":"677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54"} Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.041024 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3d80-account-create-s4vnd" event={"ID":"59f377ad-49a5-4ead-a3ab-10a3796a1cf5","Type":"ContainerDied","Data":"f2f2a6092113a6ee60a12fd7ea3bf0f04d09bd447a67c40486ee65a3ad08c7f8"} Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.041056 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2f2a6092113a6ee60a12fd7ea3bf0f04d09bd447a67c40486ee65a3ad08c7f8" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.041062 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3d80-account-create-s4vnd" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.080841 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-lock\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.080884 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6gj8\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-kube-api-access-q6gj8\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.080910 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.080943 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-cache\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.080995 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.081070 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.081085 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.081130 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:05.581113328 +0000 UTC m=+940.903994878 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.081269 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.081671 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-cache\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.085308 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-lock\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.113478 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.115596 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6gj8\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-kube-api-access-q6gj8\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.590144 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.590507 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.590540 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: E1124 17:19:05.590593 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:06.590576198 +0000 UTC m=+941.913457748 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.642375 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:19:05 crc kubenswrapper[4760]: I1124 17:19:05.642457 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.027253 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-zmrmg"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.031237 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-zmrmg"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.031276 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-zmrmg"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.031324 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-d9mv7"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.033230 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-d9mv7"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.033348 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.033980 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zmrmg" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.043853 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.044892 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.045055 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.066174 4760 scope.go:117] "RemoveContainer" containerID="b3f152069b2a7dee7f5a13f9272837d91fe5084e0326380ac66714df9597a7fc" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098684 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098724 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098775 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098800 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098834 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shbm6\" (UniqueName: \"kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.098904 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200432 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200479 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200540 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200568 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200602 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200641 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shbm6\" (UniqueName: \"kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.200885 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.202358 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.202383 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.203572 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.210612 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.210819 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.221879 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.239057 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shbm6\" (UniqueName: \"kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6\") pod \"swift-ring-rebalance-d9mv7\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.315491 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zmrmg" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.326849 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.330216 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.330750 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.351209 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-s8dmb"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.403753 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzlsr\" (UniqueName: \"kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr\") pod \"97150276-8639-4c2e-9324-9c3c840f58ec\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.403961 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts\") pod \"97150276-8639-4c2e-9324-9c3c840f58ec\" (UID: \"97150276-8639-4c2e-9324-9c3c840f58ec\") " Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.404719 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "97150276-8639-4c2e-9324-9c3c840f58ec" (UID: "97150276-8639-4c2e-9324-9c3c840f58ec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.407623 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr" (OuterVolumeSpecName: "kube-api-access-lzlsr") pod "97150276-8639-4c2e-9324-9c3c840f58ec" (UID: "97150276-8639-4c2e-9324-9c3c840f58ec"). InnerVolumeSpecName "kube-api-access-lzlsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.506702 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzlsr\" (UniqueName: \"kubernetes.io/projected/97150276-8639-4c2e-9324-9c3c840f58ec-kube-api-access-lzlsr\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.506732 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/97150276-8639-4c2e-9324-9c3c840f58ec-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.590687 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-d9mv7"] Nov 24 17:19:06 crc kubenswrapper[4760]: W1124 17:19:06.595875 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0bcc362_6648_4630_b41b_610209865eea.slice/crio-62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b WatchSource:0}: Error finding container 62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b: Status 404 returned error can't find the container with id 62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.608880 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:06 crc kubenswrapper[4760]: E1124 17:19:06.609390 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:06 crc kubenswrapper[4760]: E1124 17:19:06.609439 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:06 crc kubenswrapper[4760]: E1124 17:19:06.609530 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:08.609501608 +0000 UTC m=+943.932383168 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.868002 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-74m25"] Nov 24 17:19:06 crc kubenswrapper[4760]: E1124 17:19:06.868371 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97150276-8639-4c2e-9324-9c3c840f58ec" containerName="mariadb-account-create" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.868392 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="97150276-8639-4c2e-9324-9c3c840f58ec" containerName="mariadb-account-create" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.868630 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="97150276-8639-4c2e-9324-9c3c840f58ec" containerName="mariadb-account-create" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.869222 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-74m25" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.881463 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-74m25"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.913464 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.913576 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm9m5\" (UniqueName: \"kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.969142 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-0ac2-account-create-s59pm"] Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.970513 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.978110 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 24 17:19:06 crc kubenswrapper[4760]: I1124 17:19:06.993389 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0ac2-account-create-s59pm"] Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.016500 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm9m5\" (UniqueName: \"kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.016569 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmwmw\" (UniqueName: \"kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.016700 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.016742 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.017964 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.040696 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm9m5\" (UniqueName: \"kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5\") pod \"glance-db-create-74m25\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " pod="openstack/glance-db-create-74m25" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.100408 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9575-account-create-8zpxz" event={"ID":"97150276-8639-4c2e-9324-9c3c840f58ec","Type":"ContainerDied","Data":"5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b"} Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.100454 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5de68ad69624b8fddee929fc448a21ae41f6bda541a15ed0e8a6f1638716706b" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.100454 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9575-account-create-8zpxz" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.102202 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerStarted","Data":"72a1828d7eeb280cd09c64874cdbe04be5bc19f6b8bc85331bcdd4ed7c261b65"} Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.103588 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-d9mv7" event={"ID":"d0bcc362-6648-4630-b41b-610209865eea","Type":"ContainerStarted","Data":"62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b"} Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.105532 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zmrmg" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.118082 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmwmw\" (UniqueName: \"kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.118189 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.118791 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.138196 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmwmw\" (UniqueName: \"kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw\") pod \"glance-0ac2-account-create-s59pm\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.147956 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-zmrmg"] Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.154803 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-zmrmg"] Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.201579 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-74m25" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.342318 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.476911 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7373f65f-5ddc-428e-911c-fad3889ee3d2" path="/var/lib/kubelet/pods/7373f65f-5ddc-428e-911c-fad3889ee3d2/volumes" Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.652057 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-74m25"] Nov 24 17:19:07 crc kubenswrapper[4760]: W1124 17:19:07.657141 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5659f320_0add_4e47_a4d9_b0aedac33d73.slice/crio-a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21 WatchSource:0}: Error finding container a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21: Status 404 returned error can't find the container with id a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21 Nov 24 17:19:07 crc kubenswrapper[4760]: I1124 17:19:07.842362 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0ac2-account-create-s59pm"] Nov 24 17:19:07 crc kubenswrapper[4760]: W1124 17:19:07.845524 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod925c6a06_e39d_42ff_abc8_a318552833b1.slice/crio-f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476 WatchSource:0}: Error finding container f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476: Status 404 returned error can't find the container with id f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476 Nov 24 17:19:08 crc kubenswrapper[4760]: I1124 17:19:08.115728 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0ac2-account-create-s59pm" event={"ID":"925c6a06-e39d-42ff-abc8-a318552833b1","Type":"ContainerStarted","Data":"f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476"} Nov 24 17:19:08 crc kubenswrapper[4760]: I1124 17:19:08.117243 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-74m25" event={"ID":"5659f320-0add-4e47-a4d9-b0aedac33d73","Type":"ContainerStarted","Data":"a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21"} Nov 24 17:19:08 crc kubenswrapper[4760]: I1124 17:19:08.118969 4760 generic.go:334] "Generic (PLEG): container finished" podID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerID="72a1828d7eeb280cd09c64874cdbe04be5bc19f6b8bc85331bcdd4ed7c261b65" exitCode=0 Nov 24 17:19:08 crc kubenswrapper[4760]: I1124 17:19:08.119051 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerDied","Data":"72a1828d7eeb280cd09c64874cdbe04be5bc19f6b8bc85331bcdd4ed7c261b65"} Nov 24 17:19:08 crc kubenswrapper[4760]: I1124 17:19:08.648880 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:08 crc kubenswrapper[4760]: E1124 17:19:08.649163 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:08 crc kubenswrapper[4760]: E1124 17:19:08.649411 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:08 crc kubenswrapper[4760]: E1124 17:19:08.649473 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:12.649452847 +0000 UTC m=+947.972334407 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:11 crc kubenswrapper[4760]: I1124 17:19:11.151646 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-74m25" event={"ID":"5659f320-0add-4e47-a4d9-b0aedac33d73","Type":"ContainerStarted","Data":"5d09efc14073b243618ad045a3bbc0eec6d7e4d317e8a2f3249c7f0fa60498d1"} Nov 24 17:19:11 crc kubenswrapper[4760]: I1124 17:19:11.153700 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0ac2-account-create-s59pm" event={"ID":"925c6a06-e39d-42ff-abc8-a318552833b1","Type":"ContainerStarted","Data":"91ec248ba846facb12ac6c07dc8ebe62f07b869d339fd4cba3b73f87e0c02630"} Nov 24 17:19:11 crc kubenswrapper[4760]: I1124 17:19:11.178996 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-74m25" podStartSLOduration=5.178973139 podStartE2EDuration="5.178973139s" podCreationTimestamp="2025-11-24 17:19:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:11.171637698 +0000 UTC m=+946.494519248" watchObservedRunningTime="2025-11-24 17:19:11.178973139 +0000 UTC m=+946.501854689" Nov 24 17:19:11 crc kubenswrapper[4760]: I1124 17:19:11.198136 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-0ac2-account-create-s59pm" podStartSLOduration=5.198111566 podStartE2EDuration="5.198111566s" podCreationTimestamp="2025-11-24 17:19:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:11.192058769 +0000 UTC m=+946.514940319" watchObservedRunningTime="2025-11-24 17:19:11.198111566 +0000 UTC m=+946.520993166" Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.164928 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerStarted","Data":"f8fa40b17e898905ec83c516573a21e4707a3508f1c55da17dccf91f0952b4d0"} Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.165179 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.169358 4760 generic.go:334] "Generic (PLEG): container finished" podID="925c6a06-e39d-42ff-abc8-a318552833b1" containerID="91ec248ba846facb12ac6c07dc8ebe62f07b869d339fd4cba3b73f87e0c02630" exitCode=0 Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.169449 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0ac2-account-create-s59pm" event={"ID":"925c6a06-e39d-42ff-abc8-a318552833b1","Type":"ContainerDied","Data":"91ec248ba846facb12ac6c07dc8ebe62f07b869d339fd4cba3b73f87e0c02630"} Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.171943 4760 generic.go:334] "Generic (PLEG): container finished" podID="5659f320-0add-4e47-a4d9-b0aedac33d73" containerID="5d09efc14073b243618ad045a3bbc0eec6d7e4d317e8a2f3249c7f0fa60498d1" exitCode=0 Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.172049 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-74m25" event={"ID":"5659f320-0add-4e47-a4d9-b0aedac33d73","Type":"ContainerDied","Data":"5d09efc14073b243618ad045a3bbc0eec6d7e4d317e8a2f3249c7f0fa60498d1"} Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.190376 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-tqdfj" podStartSLOduration=9.190354723 podStartE2EDuration="9.190354723s" podCreationTimestamp="2025-11-24 17:19:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:12.18332363 +0000 UTC m=+947.506205200" watchObservedRunningTime="2025-11-24 17:19:12.190354723 +0000 UTC m=+947.513236283" Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.581052 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 24 17:19:12 crc kubenswrapper[4760]: I1124 17:19:12.724339 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:12 crc kubenswrapper[4760]: E1124 17:19:12.724574 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:12 crc kubenswrapper[4760]: E1124 17:19:12.724612 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:12 crc kubenswrapper[4760]: E1124 17:19:12.724683 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:20.724659807 +0000 UTC m=+956.047541357 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.880431 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-74m25" Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.885248 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.946829 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts\") pod \"925c6a06-e39d-42ff-abc8-a318552833b1\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.946968 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm9m5\" (UniqueName: \"kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5\") pod \"5659f320-0add-4e47-a4d9-b0aedac33d73\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.947161 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts\") pod \"5659f320-0add-4e47-a4d9-b0aedac33d73\" (UID: \"5659f320-0add-4e47-a4d9-b0aedac33d73\") " Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.947213 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmwmw\" (UniqueName: \"kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw\") pod \"925c6a06-e39d-42ff-abc8-a318552833b1\" (UID: \"925c6a06-e39d-42ff-abc8-a318552833b1\") " Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.948678 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5659f320-0add-4e47-a4d9-b0aedac33d73" (UID: "5659f320-0add-4e47-a4d9-b0aedac33d73"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.948901 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "925c6a06-e39d-42ff-abc8-a318552833b1" (UID: "925c6a06-e39d-42ff-abc8-a318552833b1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.952740 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw" (OuterVolumeSpecName: "kube-api-access-qmwmw") pod "925c6a06-e39d-42ff-abc8-a318552833b1" (UID: "925c6a06-e39d-42ff-abc8-a318552833b1"). InnerVolumeSpecName "kube-api-access-qmwmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:13 crc kubenswrapper[4760]: I1124 17:19:13.955141 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5" (OuterVolumeSpecName: "kube-api-access-gm9m5") pod "5659f320-0add-4e47-a4d9-b0aedac33d73" (UID: "5659f320-0add-4e47-a4d9-b0aedac33d73"). InnerVolumeSpecName "kube-api-access-gm9m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.051048 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/925c6a06-e39d-42ff-abc8-a318552833b1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.051105 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm9m5\" (UniqueName: \"kubernetes.io/projected/5659f320-0add-4e47-a4d9-b0aedac33d73-kube-api-access-gm9m5\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.051129 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5659f320-0add-4e47-a4d9-b0aedac33d73-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.051148 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmwmw\" (UniqueName: \"kubernetes.io/projected/925c6a06-e39d-42ff-abc8-a318552833b1-kube-api-access-qmwmw\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.204704 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-d9mv7" event={"ID":"d0bcc362-6648-4630-b41b-610209865eea","Type":"ContainerStarted","Data":"4a8d47f050a18f5f4fe253dd907e2720f20419ddd07c6ff3310a4e75c0495781"} Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.211365 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0ac2-account-create-s59pm" event={"ID":"925c6a06-e39d-42ff-abc8-a318552833b1","Type":"ContainerDied","Data":"f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476"} Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.211435 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9b902f2a787cd7da6d0aa72a03cd1578339d18df53a52fe9bfc5aa89042e476" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.211380 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0ac2-account-create-s59pm" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.213603 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-74m25" event={"ID":"5659f320-0add-4e47-a4d9-b0aedac33d73","Type":"ContainerDied","Data":"a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21"} Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.213642 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a93f0005dbb0fd3be9567d4538fc3b6c1e02a1bcdfe2ed8ab1cf315e8f50fd21" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.213713 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-74m25" Nov 24 17:19:14 crc kubenswrapper[4760]: I1124 17:19:14.234772 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-d9mv7" podStartSLOduration=2.074401462 podStartE2EDuration="9.234755095s" podCreationTimestamp="2025-11-24 17:19:05 +0000 UTC" firstStartedPulling="2025-11-24 17:19:06.59831197 +0000 UTC m=+941.921193520" lastFinishedPulling="2025-11-24 17:19:13.758665593 +0000 UTC m=+949.081547153" observedRunningTime="2025-11-24 17:19:14.228571025 +0000 UTC m=+949.551452605" watchObservedRunningTime="2025-11-24 17:19:14.234755095 +0000 UTC m=+949.557636645" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.273138 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-kpvf5"] Nov 24 17:19:17 crc kubenswrapper[4760]: E1124 17:19:17.273726 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5659f320-0add-4e47-a4d9-b0aedac33d73" containerName="mariadb-database-create" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.273739 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5659f320-0add-4e47-a4d9-b0aedac33d73" containerName="mariadb-database-create" Nov 24 17:19:17 crc kubenswrapper[4760]: E1124 17:19:17.273750 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="925c6a06-e39d-42ff-abc8-a318552833b1" containerName="mariadb-account-create" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.273755 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="925c6a06-e39d-42ff-abc8-a318552833b1" containerName="mariadb-account-create" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.273905 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="925c6a06-e39d-42ff-abc8-a318552833b1" containerName="mariadb-account-create" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.273916 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="5659f320-0add-4e47-a4d9-b0aedac33d73" containerName="mariadb-database-create" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.274420 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.277679 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2bglw" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.278779 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.282271 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-kpvf5"] Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.331722 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdzst\" (UniqueName: \"kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.331786 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.332415 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.332509 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.434621 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.434707 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.434834 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdzst\" (UniqueName: \"kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.434883 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.443546 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.443576 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.458925 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.460738 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdzst\" (UniqueName: \"kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst\") pod \"glance-db-sync-kpvf5\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:17 crc kubenswrapper[4760]: I1124 17:19:17.601985 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:18 crc kubenswrapper[4760]: I1124 17:19:18.197914 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-kpvf5"] Nov 24 17:19:18 crc kubenswrapper[4760]: I1124 17:19:18.249969 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-kpvf5" event={"ID":"e036f00c-517f-4156-9e2f-52ce275d44f6","Type":"ContainerStarted","Data":"12ec7f21cf73590c9e22d9fab340c12e4e91586ce85d7715c43bf6d41bc34016"} Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.111393 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.167032 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.167593 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="dnsmasq-dns" containerID="cri-o://fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e" gracePeriod=10 Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.685497 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.818879 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config\") pod \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.819267 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc\") pod \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.819376 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhcc4\" (UniqueName: \"kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4\") pod \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\" (UID: \"b5fccb9a-e3e5-44ed-8dea-34814910b15c\") " Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.825666 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4" (OuterVolumeSpecName: "kube-api-access-hhcc4") pod "b5fccb9a-e3e5-44ed-8dea-34814910b15c" (UID: "b5fccb9a-e3e5-44ed-8dea-34814910b15c"). InnerVolumeSpecName "kube-api-access-hhcc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.859893 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config" (OuterVolumeSpecName: "config") pod "b5fccb9a-e3e5-44ed-8dea-34814910b15c" (UID: "b5fccb9a-e3e5-44ed-8dea-34814910b15c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.875489 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b5fccb9a-e3e5-44ed-8dea-34814910b15c" (UID: "b5fccb9a-e3e5-44ed-8dea-34814910b15c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.921596 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.921630 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5fccb9a-e3e5-44ed-8dea-34814910b15c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:19 crc kubenswrapper[4760]: I1124 17:19:19.921640 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhcc4\" (UniqueName: \"kubernetes.io/projected/b5fccb9a-e3e5-44ed-8dea-34814910b15c-kube-api-access-hhcc4\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.271986 4760 generic.go:334] "Generic (PLEG): container finished" podID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerID="fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e" exitCode=0 Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.272053 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" event={"ID":"b5fccb9a-e3e5-44ed-8dea-34814910b15c","Type":"ContainerDied","Data":"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e"} Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.272079 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" event={"ID":"b5fccb9a-e3e5-44ed-8dea-34814910b15c","Type":"ContainerDied","Data":"7bd5a61e08b0565a139799c1394fd049f0419e95cb1a221028ac7c720bdd75b6"} Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.272094 4760 scope.go:117] "RemoveContainer" containerID="fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.272207 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-x75ct" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.322032 4760 scope.go:117] "RemoveContainer" containerID="682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.330867 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.337639 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-x75ct"] Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.347171 4760 scope.go:117] "RemoveContainer" containerID="fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e" Nov 24 17:19:20 crc kubenswrapper[4760]: E1124 17:19:20.347548 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e\": container with ID starting with fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e not found: ID does not exist" containerID="fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.347623 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e"} err="failed to get container status \"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e\": rpc error: code = NotFound desc = could not find container \"fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e\": container with ID starting with fe76f287d16836478a3305de8635d9322c95196177db837cf08aa8fe3b771f8e not found: ID does not exist" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.347651 4760 scope.go:117] "RemoveContainer" containerID="682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e" Nov 24 17:19:20 crc kubenswrapper[4760]: E1124 17:19:20.348093 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e\": container with ID starting with 682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e not found: ID does not exist" containerID="682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.348121 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e"} err="failed to get container status \"682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e\": rpc error: code = NotFound desc = could not find container \"682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e\": container with ID starting with 682ff10bca0a3178fb2fa357c6eb8a9f7d6adb6afc2f208045fa56ded33d003e not found: ID does not exist" Nov 24 17:19:20 crc kubenswrapper[4760]: I1124 17:19:20.733671 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:20 crc kubenswrapper[4760]: E1124 17:19:20.734256 4760 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 17:19:20 crc kubenswrapper[4760]: E1124 17:19:20.734273 4760 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 17:19:20 crc kubenswrapper[4760]: E1124 17:19:20.734317 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift podName:8b420e33-0bf9-4d88-b33e-b5ba674ea4d9 nodeName:}" failed. No retries permitted until 2025-11-24 17:19:36.734302898 +0000 UTC m=+972.057184448 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift") pod "swift-storage-0" (UID: "8b420e33-0bf9-4d88-b33e-b5ba674ea4d9") : configmap "swift-ring-files" not found Nov 24 17:19:21 crc kubenswrapper[4760]: I1124 17:19:21.289197 4760 generic.go:334] "Generic (PLEG): container finished" podID="d0bcc362-6648-4630-b41b-610209865eea" containerID="4a8d47f050a18f5f4fe253dd907e2720f20419ddd07c6ff3310a4e75c0495781" exitCode=0 Nov 24 17:19:21 crc kubenswrapper[4760]: I1124 17:19:21.289241 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-d9mv7" event={"ID":"d0bcc362-6648-4630-b41b-610209865eea","Type":"ContainerDied","Data":"4a8d47f050a18f5f4fe253dd907e2720f20419ddd07c6ff3310a4e75c0495781"} Nov 24 17:19:21 crc kubenswrapper[4760]: I1124 17:19:21.477921 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" path="/var/lib/kubelet/pods/b5fccb9a-e3e5-44ed-8dea-34814910b15c/volumes" Nov 24 17:19:21 crc kubenswrapper[4760]: I1124 17:19:21.976210 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-dl9cm" podUID="39e10c47-4e85-46de-a754-3ee0245718d7" containerName="ovn-controller" probeResult="failure" output=< Nov 24 17:19:21 crc kubenswrapper[4760]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 24 17:19:21 crc kubenswrapper[4760]: > Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.059833 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.060756 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-bnfkl" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.289684 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dl9cm-config-ld56d"] Nov 24 17:19:22 crc kubenswrapper[4760]: E1124 17:19:22.290172 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="init" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.290189 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="init" Nov 24 17:19:22 crc kubenswrapper[4760]: E1124 17:19:22.290207 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="dnsmasq-dns" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.290215 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="dnsmasq-dns" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.290412 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5fccb9a-e3e5-44ed-8dea-34814910b15c" containerName="dnsmasq-dns" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.291103 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.294294 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.303234 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm-config-ld56d"] Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.373607 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.374058 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58rvq\" (UniqueName: \"kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.374083 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.374105 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.374125 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.374165 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.474912 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.474995 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58rvq\" (UniqueName: \"kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475041 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475074 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475093 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475150 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475411 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.475964 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.476471 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.476960 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.477905 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.499217 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58rvq\" (UniqueName: \"kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq\") pod \"ovn-controller-dl9cm-config-ld56d\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.615866 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.778299 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881690 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881796 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881826 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881871 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881894 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881916 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.881964 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shbm6\" (UniqueName: \"kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6\") pod \"d0bcc362-6648-4630-b41b-610209865eea\" (UID: \"d0bcc362-6648-4630-b41b-610209865eea\") " Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.886406 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6" (OuterVolumeSpecName: "kube-api-access-shbm6") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "kube-api-access-shbm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.886567 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.886635 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.898652 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.908853 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts" (OuterVolumeSpecName: "scripts") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.911445 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.911515 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0bcc362-6648-4630-b41b-610209865eea" (UID: "d0bcc362-6648-4630-b41b-610209865eea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987168 4760 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987205 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987216 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0bcc362-6648-4630-b41b-610209865eea-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987224 4760 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987232 4760 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d0bcc362-6648-4630-b41b-610209865eea-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987240 4760 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d0bcc362-6648-4630-b41b-610209865eea-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:22 crc kubenswrapper[4760]: I1124 17:19:22.987250 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shbm6\" (UniqueName: \"kubernetes.io/projected/d0bcc362-6648-4630-b41b-610209865eea-kube-api-access-shbm6\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.153148 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm-config-ld56d"] Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.315151 4760 generic.go:334] "Generic (PLEG): container finished" podID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerID="ae3c541e7631907d00510787756cfe3edbe148432975656cb203a8e3db203fd5" exitCode=0 Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.315249 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerDied","Data":"ae3c541e7631907d00510787756cfe3edbe148432975656cb203a8e3db203fd5"} Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.317720 4760 generic.go:334] "Generic (PLEG): container finished" podID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerID="694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4" exitCode=0 Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.317828 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerDied","Data":"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4"} Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.320246 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-d9mv7" event={"ID":"d0bcc362-6648-4630-b41b-610209865eea","Type":"ContainerDied","Data":"62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b"} Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.320275 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62ae92764f2d1142d90812b8df10459972459821b32bdc260ca55ac96ff1d74b" Nov 24 17:19:23 crc kubenswrapper[4760]: I1124 17:19:23.320282 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d9mv7" Nov 24 17:19:26 crc kubenswrapper[4760]: I1124 17:19:26.974853 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-dl9cm" podUID="39e10c47-4e85-46de-a754-3ee0245718d7" containerName="ovn-controller" probeResult="failure" output=< Nov 24 17:19:26 crc kubenswrapper[4760]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 24 17:19:26 crc kubenswrapper[4760]: > Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.383988 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerStarted","Data":"53b70a1d427266be2a542010ea82a8dcd92baf006ad471459697a2de8d524fb6"} Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.384833 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.385400 4760 generic.go:334] "Generic (PLEG): container finished" podID="15c3cc68-2372-40ad-a15e-f2513ab5da20" containerID="f8af48a875d829ae5d11e97bfe587d2c22287076bbb52fce432ceba139826cd5" exitCode=0 Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.385452 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-ld56d" event={"ID":"15c3cc68-2372-40ad-a15e-f2513ab5da20","Type":"ContainerDied","Data":"f8af48a875d829ae5d11e97bfe587d2c22287076bbb52fce432ceba139826cd5"} Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.385470 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-ld56d" event={"ID":"15c3cc68-2372-40ad-a15e-f2513ab5da20","Type":"ContainerStarted","Data":"e8f466b6e534c185adfeed3c061048a550419711d2cca1dc4e2c9bd316364dd3"} Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.386966 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerStarted","Data":"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43"} Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.387191 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.388367 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-kpvf5" event={"ID":"e036f00c-517f-4156-9e2f-52ce275d44f6","Type":"ContainerStarted","Data":"5be91801ae910f735dab71d5f928752d3833df220a53308ed1455434c9e44794"} Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.412383 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=54.923893953 podStartE2EDuration="1m3.412356351s" podCreationTimestamp="2025-11-24 17:18:27 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.504365071 +0000 UTC m=+915.827246621" lastFinishedPulling="2025-11-24 17:18:48.992827469 +0000 UTC m=+924.315709019" observedRunningTime="2025-11-24 17:19:30.406450159 +0000 UTC m=+965.729331729" watchObservedRunningTime="2025-11-24 17:19:30.412356351 +0000 UTC m=+965.735237941" Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.429083 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-kpvf5" podStartSLOduration=2.056681391 podStartE2EDuration="13.42906346s" podCreationTimestamp="2025-11-24 17:19:17 +0000 UTC" firstStartedPulling="2025-11-24 17:19:18.202982474 +0000 UTC m=+953.525864034" lastFinishedPulling="2025-11-24 17:19:29.575364513 +0000 UTC m=+964.898246103" observedRunningTime="2025-11-24 17:19:30.424415663 +0000 UTC m=+965.747297213" watchObservedRunningTime="2025-11-24 17:19:30.42906346 +0000 UTC m=+965.751945010" Nov 24 17:19:30 crc kubenswrapper[4760]: I1124 17:19:30.469311 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=56.699984386 podStartE2EDuration="1m4.469287047s" podCreationTimestamp="2025-11-24 17:18:26 +0000 UTC" firstStartedPulling="2025-11-24 17:18:40.786301385 +0000 UTC m=+916.109182935" lastFinishedPulling="2025-11-24 17:18:48.555604016 +0000 UTC m=+923.878485596" observedRunningTime="2025-11-24 17:19:30.46214291 +0000 UTC m=+965.785024460" watchObservedRunningTime="2025-11-24 17:19:30.469287047 +0000 UTC m=+965.792168597" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.703148 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.837997 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58rvq\" (UniqueName: \"kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838077 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838110 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838135 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838195 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838255 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn\") pod \"15c3cc68-2372-40ad-a15e-f2513ab5da20\" (UID: \"15c3cc68-2372-40ad-a15e-f2513ab5da20\") " Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838262 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838353 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838349 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run" (OuterVolumeSpecName: "var-run") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838584 4760 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838597 4760 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.838605 4760 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/15c3cc68-2372-40ad-a15e-f2513ab5da20-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.839328 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts" (OuterVolumeSpecName: "scripts") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.839413 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.846347 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq" (OuterVolumeSpecName: "kube-api-access-58rvq") pod "15c3cc68-2372-40ad-a15e-f2513ab5da20" (UID: "15c3cc68-2372-40ad-a15e-f2513ab5da20"). InnerVolumeSpecName "kube-api-access-58rvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.939996 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58rvq\" (UniqueName: \"kubernetes.io/projected/15c3cc68-2372-40ad-a15e-f2513ab5da20-kube-api-access-58rvq\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.940053 4760 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.940065 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/15c3cc68-2372-40ad-a15e-f2513ab5da20-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:31 crc kubenswrapper[4760]: I1124 17:19:31.977636 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-dl9cm" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.404971 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-ld56d" event={"ID":"15c3cc68-2372-40ad-a15e-f2513ab5da20","Type":"ContainerDied","Data":"e8f466b6e534c185adfeed3c061048a550419711d2cca1dc4e2c9bd316364dd3"} Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.405058 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-ld56d" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.405064 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8f466b6e534c185adfeed3c061048a550419711d2cca1dc4e2c9bd316364dd3" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.829697 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-dl9cm-config-ld56d"] Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.836695 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-dl9cm-config-ld56d"] Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.886976 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dl9cm-config-8jnr2"] Nov 24 17:19:32 crc kubenswrapper[4760]: E1124 17:19:32.887304 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0bcc362-6648-4630-b41b-610209865eea" containerName="swift-ring-rebalance" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.887319 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0bcc362-6648-4630-b41b-610209865eea" containerName="swift-ring-rebalance" Nov 24 17:19:32 crc kubenswrapper[4760]: E1124 17:19:32.887353 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15c3cc68-2372-40ad-a15e-f2513ab5da20" containerName="ovn-config" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.887359 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="15c3cc68-2372-40ad-a15e-f2513ab5da20" containerName="ovn-config" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.887506 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0bcc362-6648-4630-b41b-610209865eea" containerName="swift-ring-rebalance" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.887524 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="15c3cc68-2372-40ad-a15e-f2513ab5da20" containerName="ovn-config" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.888021 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.890086 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.905277 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm-config-8jnr2"] Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955428 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955470 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlts9\" (UniqueName: \"kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955493 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955541 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955568 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:32 crc kubenswrapper[4760]: I1124 17:19:32.955605 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057133 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057191 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlts9\" (UniqueName: \"kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057214 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057267 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057292 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.057330 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.058155 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.058461 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.058527 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.058533 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.060750 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.082663 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlts9\" (UniqueName: \"kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9\") pod \"ovn-controller-dl9cm-config-8jnr2\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.206276 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.476566 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15c3cc68-2372-40ad-a15e-f2513ab5da20" path="/var/lib/kubelet/pods/15c3cc68-2372-40ad-a15e-f2513ab5da20/volumes" Nov 24 17:19:33 crc kubenswrapper[4760]: I1124 17:19:33.679850 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dl9cm-config-8jnr2"] Nov 24 17:19:34 crc kubenswrapper[4760]: I1124 17:19:34.423614 4760 generic.go:334] "Generic (PLEG): container finished" podID="daf2a727-84ee-44a8-9150-1b81ef48212e" containerID="d2fffbf61695d3305b4722e52a2e92317c60f9d892630df88bd086c8ac5be608" exitCode=0 Nov 24 17:19:34 crc kubenswrapper[4760]: I1124 17:19:34.423687 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-8jnr2" event={"ID":"daf2a727-84ee-44a8-9150-1b81ef48212e","Type":"ContainerDied","Data":"d2fffbf61695d3305b4722e52a2e92317c60f9d892630df88bd086c8ac5be608"} Nov 24 17:19:34 crc kubenswrapper[4760]: I1124 17:19:34.423921 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-8jnr2" event={"ID":"daf2a727-84ee-44a8-9150-1b81ef48212e","Type":"ContainerStarted","Data":"c9fd1d606d66cec9eafa3d19a95c40586f26b1f2b6fdde272ec1805a3aa3de69"} Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.642692 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.643139 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.738429 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801597 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlts9\" (UniqueName: \"kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801679 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801708 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801764 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801792 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801830 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801865 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn\") pod \"daf2a727-84ee-44a8-9150-1b81ef48212e\" (UID: \"daf2a727-84ee-44a8-9150-1b81ef48212e\") " Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.801895 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run" (OuterVolumeSpecName: "var-run") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.802014 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.802219 4760 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.802243 4760 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.802262 4760 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/daf2a727-84ee-44a8-9150-1b81ef48212e-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.802579 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.803711 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts" (OuterVolumeSpecName: "scripts") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.813936 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9" (OuterVolumeSpecName: "kube-api-access-xlts9") pod "daf2a727-84ee-44a8-9150-1b81ef48212e" (UID: "daf2a727-84ee-44a8-9150-1b81ef48212e"). InnerVolumeSpecName "kube-api-access-xlts9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.903436 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlts9\" (UniqueName: \"kubernetes.io/projected/daf2a727-84ee-44a8-9150-1b81ef48212e-kube-api-access-xlts9\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.903463 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:35 crc kubenswrapper[4760]: I1124 17:19:35.903474 4760 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/daf2a727-84ee-44a8-9150-1b81ef48212e-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.448904 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dl9cm-config-8jnr2" event={"ID":"daf2a727-84ee-44a8-9150-1b81ef48212e","Type":"ContainerDied","Data":"c9fd1d606d66cec9eafa3d19a95c40586f26b1f2b6fdde272ec1805a3aa3de69"} Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.448947 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9fd1d606d66cec9eafa3d19a95c40586f26b1f2b6fdde272ec1805a3aa3de69" Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.448990 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dl9cm-config-8jnr2" Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.816790 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-dl9cm-config-8jnr2"] Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.821247 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.823916 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-dl9cm-config-8jnr2"] Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.840423 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8b420e33-0bf9-4d88-b33e-b5ba674ea4d9-etc-swift\") pod \"swift-storage-0\" (UID: \"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9\") " pod="openstack/swift-storage-0" Nov 24 17:19:36 crc kubenswrapper[4760]: I1124 17:19:36.926300 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 17:19:37 crc kubenswrapper[4760]: I1124 17:19:37.460631 4760 generic.go:334] "Generic (PLEG): container finished" podID="e036f00c-517f-4156-9e2f-52ce275d44f6" containerID="5be91801ae910f735dab71d5f928752d3833df220a53308ed1455434c9e44794" exitCode=0 Nov 24 17:19:37 crc kubenswrapper[4760]: I1124 17:19:37.460725 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-kpvf5" event={"ID":"e036f00c-517f-4156-9e2f-52ce275d44f6","Type":"ContainerDied","Data":"5be91801ae910f735dab71d5f928752d3833df220a53308ed1455434c9e44794"} Nov 24 17:19:37 crc kubenswrapper[4760]: I1124 17:19:37.475069 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daf2a727-84ee-44a8-9150-1b81ef48212e" path="/var/lib/kubelet/pods/daf2a727-84ee-44a8-9150-1b81ef48212e/volumes" Nov 24 17:19:37 crc kubenswrapper[4760]: I1124 17:19:37.524296 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.468688 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"6d845600713d751f9cbf58aa7714f54241c4b9744bd90272db978b9f6f3010a4"} Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.869443 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.954823 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data\") pod \"e036f00c-517f-4156-9e2f-52ce275d44f6\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.954935 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdzst\" (UniqueName: \"kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst\") pod \"e036f00c-517f-4156-9e2f-52ce275d44f6\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.955205 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data\") pod \"e036f00c-517f-4156-9e2f-52ce275d44f6\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.955256 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle\") pod \"e036f00c-517f-4156-9e2f-52ce275d44f6\" (UID: \"e036f00c-517f-4156-9e2f-52ce275d44f6\") " Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.961165 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e036f00c-517f-4156-9e2f-52ce275d44f6" (UID: "e036f00c-517f-4156-9e2f-52ce275d44f6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.963925 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst" (OuterVolumeSpecName: "kube-api-access-kdzst") pod "e036f00c-517f-4156-9e2f-52ce275d44f6" (UID: "e036f00c-517f-4156-9e2f-52ce275d44f6"). InnerVolumeSpecName "kube-api-access-kdzst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:38 crc kubenswrapper[4760]: I1124 17:19:38.990270 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e036f00c-517f-4156-9e2f-52ce275d44f6" (UID: "e036f00c-517f-4156-9e2f-52ce275d44f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.031971 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data" (OuterVolumeSpecName: "config-data") pod "e036f00c-517f-4156-9e2f-52ce275d44f6" (UID: "e036f00c-517f-4156-9e2f-52ce275d44f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.057788 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.057828 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdzst\" (UniqueName: \"kubernetes.io/projected/e036f00c-517f-4156-9e2f-52ce275d44f6-kube-api-access-kdzst\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.057843 4760 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.057855 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e036f00c-517f-4156-9e2f-52ce275d44f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.480985 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-kpvf5" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.484420 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-kpvf5" event={"ID":"e036f00c-517f-4156-9e2f-52ce275d44f6","Type":"ContainerDied","Data":"12ec7f21cf73590c9e22d9fab340c12e4e91586ce85d7715c43bf6d41bc34016"} Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.506132 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12ec7f21cf73590c9e22d9fab340c12e4e91586ce85d7715c43bf6d41bc34016" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.506168 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"8c9cccecfaa3a8794c4e07e34bdb78534fef341994e3486c0b61868d1d50ebdf"} Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.506189 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"a559a73a5cc948c58a879a78644bdd002d592f7f68e31e6d55bb869903a057be"} Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.506206 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"b2641b098877ee99ce46cd1b635cc92c55723e87d0b41ce4eb33d2a072e40094"} Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.800661 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:39 crc kubenswrapper[4760]: E1124 17:19:39.801263 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e036f00c-517f-4156-9e2f-52ce275d44f6" containerName="glance-db-sync" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.801276 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e036f00c-517f-4156-9e2f-52ce275d44f6" containerName="glance-db-sync" Nov 24 17:19:39 crc kubenswrapper[4760]: E1124 17:19:39.801291 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf2a727-84ee-44a8-9150-1b81ef48212e" containerName="ovn-config" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.801297 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf2a727-84ee-44a8-9150-1b81ef48212e" containerName="ovn-config" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.801455 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="daf2a727-84ee-44a8-9150-1b81ef48212e" containerName="ovn-config" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.801468 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e036f00c-517f-4156-9e2f-52ce275d44f6" containerName="glance-db-sync" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.802224 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.826394 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.872061 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.872140 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52n2x\" (UniqueName: \"kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.872161 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.872303 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.872381 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.973538 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52n2x\" (UniqueName: \"kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.973579 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.973622 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.973642 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.973700 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.974634 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.974659 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.974676 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.974687 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:39 crc kubenswrapper[4760]: I1124 17:19:39.988925 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52n2x\" (UniqueName: \"kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x\") pod \"dnsmasq-dns-5b946c75cc-kh2bh\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:40 crc kubenswrapper[4760]: I1124 17:19:40.115805 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:42 crc kubenswrapper[4760]: I1124 17:19:42.528819 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"aa892d8d2dda1d517ffd0811d4ec75a89e6223be7b2dcc9c69d422155f58b73a"} Nov 24 17:19:42 crc kubenswrapper[4760]: I1124 17:19:42.709789 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:43 crc kubenswrapper[4760]: I1124 17:19:43.538989 4760 generic.go:334] "Generic (PLEG): container finished" podID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerID="4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d" exitCode=0 Nov 24 17:19:43 crc kubenswrapper[4760]: I1124 17:19:43.539125 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" event={"ID":"619e83e9-c912-4f56-832c-df1c6ab8f428","Type":"ContainerDied","Data":"4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d"} Nov 24 17:19:43 crc kubenswrapper[4760]: I1124 17:19:43.539568 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" event={"ID":"619e83e9-c912-4f56-832c-df1c6ab8f428","Type":"ContainerStarted","Data":"0d90dc255a4087e8cb91b47843b1b6e22df89de65e105151abea54ce203e42d0"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.549762 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" event={"ID":"619e83e9-c912-4f56-832c-df1c6ab8f428","Type":"ContainerStarted","Data":"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.555217 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.560160 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"9d373dfd203999cd22f7bf7d69d9e831837f14800d87f8667d0dd44d9fa2c987"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.560388 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"b408ab9454135aa48d4c718128ebba6b492d0c2ff4037561413adc9cee3a4562"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.560517 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"3ddcefb7a13ff38a4ba510a7cfa6683f2b24094ac25b94921cf91333e6ee47ef"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.560616 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"82a6878f30fc979b8130f8e2030ac486a1d2143875c29981dc521acb53baf164"} Nov 24 17:19:44 crc kubenswrapper[4760]: I1124 17:19:44.584281 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" podStartSLOduration=5.584262471 podStartE2EDuration="5.584262471s" podCreationTimestamp="2025-11-24 17:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:44.57936128 +0000 UTC m=+979.902242840" watchObservedRunningTime="2025-11-24 17:19:44.584262471 +0000 UTC m=+979.907144021" Nov 24 17:19:46 crc kubenswrapper[4760]: I1124 17:19:46.583213 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"5bf0e23888a23ab3ba329912408b2fb40647385539817bcc5997c6851a1b8242"} Nov 24 17:19:46 crc kubenswrapper[4760]: I1124 17:19:46.584952 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"90b253a721a80245f9af811541d54407cbc8ced914825a6c17cbb69ad99d56db"} Nov 24 17:19:46 crc kubenswrapper[4760]: I1124 17:19:46.585451 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"d9a80adc22387d056d1c197561dfd37e7fe06bebd9936f8d72fe31eb2bcb4153"} Nov 24 17:19:46 crc kubenswrapper[4760]: I1124 17:19:46.585553 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"0fbd1be53b06344e68e64672b23446a1183c472ed8a98699531086874b5e5045"} Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.599052 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"365170d556bc26d050c012434d075d5a7f311379b6fbe4c8158228c61c573a63"} Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.599316 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"c1d895cc48f53b0a0d46a98b9ead099c48f7a66d29ee5db6179d5a6d173c7835"} Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.599325 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8b420e33-0bf9-4d88-b33e-b5ba674ea4d9","Type":"ContainerStarted","Data":"3134cc8d94fe646f05ef82706b8bbf29d24b09b115760e2a84c59f7f71f1301b"} Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.656682 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.774612488 podStartE2EDuration="44.656650479s" podCreationTimestamp="2025-11-24 17:19:03 +0000 UTC" firstStartedPulling="2025-11-24 17:19:37.529356498 +0000 UTC m=+972.852238048" lastFinishedPulling="2025-11-24 17:19:45.411394499 +0000 UTC m=+980.734276039" observedRunningTime="2025-11-24 17:19:47.644687747 +0000 UTC m=+982.967569307" watchObservedRunningTime="2025-11-24 17:19:47.656650479 +0000 UTC m=+982.979532069" Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.933498 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.933692 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="dnsmasq-dns" containerID="cri-o://d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b" gracePeriod=10 Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.977746 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.979352 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.982172 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 24 17:19:47 crc kubenswrapper[4760]: I1124 17:19:47.989598 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.111185 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122370 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgxn8\" (UniqueName: \"kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122439 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122475 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122545 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122779 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.122872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.225871 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.225950 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226035 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgxn8\" (UniqueName: \"kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226065 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226089 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226184 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226856 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.226859 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.227485 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.227764 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.228305 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.247806 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgxn8\" (UniqueName: \"kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8\") pod \"dnsmasq-dns-74f6bcbc87-rlzqt\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.320434 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.427398 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-8cjjt"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.428471 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.443643 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8cjjt"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.523739 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-tfxz8"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.524844 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.529131 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.530854 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.530932 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96s2h\" (UniqueName: \"kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.536524 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-400b-account-create-cb447"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.537826 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.540085 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.555976 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tfxz8"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.562629 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-400b-account-create-cb447"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.588558 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.621115 4760 generic.go:334] "Generic (PLEG): container finished" podID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerID="d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b" exitCode=0 Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.621157 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.621254 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" event={"ID":"619e83e9-c912-4f56-832c-df1c6ab8f428","Type":"ContainerDied","Data":"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b"} Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.621279 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-kh2bh" event={"ID":"619e83e9-c912-4f56-832c-df1c6ab8f428","Type":"ContainerDied","Data":"0d90dc255a4087e8cb91b47843b1b6e22df89de65e105151abea54ce203e42d0"} Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.621296 4760 scope.go:117] "RemoveContainer" containerID="d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.633063 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-b2cd-account-create-vmd78"] Nov 24 17:19:48 crc kubenswrapper[4760]: E1124 17:19:48.633460 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="dnsmasq-dns" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.633475 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="dnsmasq-dns" Nov 24 17:19:48 crc kubenswrapper[4760]: E1124 17:19:48.633507 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="init" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.633515 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="init" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.633672 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" containerName="dnsmasq-dns" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.634240 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635112 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc\") pod \"619e83e9-c912-4f56-832c-df1c6ab8f428\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635178 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb\") pod \"619e83e9-c912-4f56-832c-df1c6ab8f428\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635209 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb\") pod \"619e83e9-c912-4f56-832c-df1c6ab8f428\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635291 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config\") pod \"619e83e9-c912-4f56-832c-df1c6ab8f428\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635322 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52n2x\" (UniqueName: \"kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x\") pod \"619e83e9-c912-4f56-832c-df1c6ab8f428\" (UID: \"619e83e9-c912-4f56-832c-df1c6ab8f428\") " Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635631 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635663 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96s2h\" (UniqueName: \"kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635694 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2cwm\" (UniqueName: \"kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635770 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635834 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czz8r\" (UniqueName: \"kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.635869 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.636985 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.644738 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.648058 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x" (OuterVolumeSpecName: "kube-api-access-52n2x") pod "619e83e9-c912-4f56-832c-df1c6ab8f428" (UID: "619e83e9-c912-4f56-832c-df1c6ab8f428"). InnerVolumeSpecName "kube-api-access-52n2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.672353 4760 scope.go:117] "RemoveContainer" containerID="4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.715157 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b2cd-account-create-vmd78"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.731266 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96s2h\" (UniqueName: \"kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h\") pod \"cinder-db-create-8cjjt\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738157 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738286 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738396 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czz8r\" (UniqueName: \"kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738505 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738592 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2cwm\" (UniqueName: \"kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738701 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kprdd\" (UniqueName: \"kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738862 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52n2x\" (UniqueName: \"kubernetes.io/projected/619e83e9-c912-4f56-832c-df1c6ab8f428-kube-api-access-52n2x\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.738778 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.739198 4760 scope.go:117] "RemoveContainer" containerID="d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.739264 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: E1124 17:19:48.740948 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b\": container with ID starting with d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b not found: ID does not exist" containerID="d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.740982 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b"} err="failed to get container status \"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b\": rpc error: code = NotFound desc = could not find container \"d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b\": container with ID starting with d9935da1249edfe18d74bd25b8c0ef6d3c14d1ac99755c18f5999791e9e4093b not found: ID does not exist" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.741022 4760 scope.go:117] "RemoveContainer" containerID="4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d" Nov 24 17:19:48 crc kubenswrapper[4760]: E1124 17:19:48.741561 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d\": container with ID starting with 4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d not found: ID does not exist" containerID="4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.741606 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d"} err="failed to get container status \"4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d\": rpc error: code = NotFound desc = could not find container \"4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d\": container with ID starting with 4bc56b83682d55c6d35c88d935d7ba76c6919b30f4d84fd2e6915529e0925b1d not found: ID does not exist" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.745480 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "619e83e9-c912-4f56-832c-df1c6ab8f428" (UID: "619e83e9-c912-4f56-832c-df1c6ab8f428"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.752320 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config" (OuterVolumeSpecName: "config") pod "619e83e9-c912-4f56-832c-df1c6ab8f428" (UID: "619e83e9-c912-4f56-832c-df1c6ab8f428"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.757178 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "619e83e9-c912-4f56-832c-df1c6ab8f428" (UID: "619e83e9-c912-4f56-832c-df1c6ab8f428"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.770443 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "619e83e9-c912-4f56-832c-df1c6ab8f428" (UID: "619e83e9-c912-4f56-832c-df1c6ab8f428"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.770801 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.771591 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2cwm\" (UniqueName: \"kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm\") pod \"barbican-400b-account-create-cb447\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.773023 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czz8r\" (UniqueName: \"kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r\") pod \"barbican-db-create-tfxz8\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.799557 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-pbkv7"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.800938 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.803625 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.804085 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.804248 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t62xm" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.812183 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pbkv7"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.813632 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.824207 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-8s242"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.825702 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.838419 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8s242"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840018 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840116 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840168 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vl58\" (UniqueName: \"kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840213 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kprdd\" (UniqueName: \"kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840242 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840317 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64vn6\" (UniqueName: \"kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840360 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840463 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840486 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840500 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.840515 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/619e83e9-c912-4f56-832c-df1c6ab8f428-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.841083 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.860262 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kprdd\" (UniqueName: \"kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd\") pod \"cinder-b2cd-account-create-vmd78\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.907379 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.910548 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ce5c-account-create-6bcw4"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.911497 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.913572 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.923762 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.925847 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ce5c-account-create-6bcw4"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.941524 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942284 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942336 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vl58\" (UniqueName: \"kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942373 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942403 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942434 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64vn6\" (UniqueName: \"kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.942469 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4vws\" (UniqueName: \"kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.943566 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.948345 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.954075 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.961724 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.968932 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vl58\" (UniqueName: \"kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58\") pod \"keystone-db-sync-pbkv7\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.974473 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64vn6\" (UniqueName: \"kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6\") pod \"neutron-db-create-8s242\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " pod="openstack/neutron-db-create-8s242" Nov 24 17:19:48 crc kubenswrapper[4760]: I1124 17:19:48.979828 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-kh2bh"] Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.005528 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.032250 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.048263 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.048306 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4vws\" (UniqueName: \"kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.049298 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.065549 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4vws\" (UniqueName: \"kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws\") pod \"neutron-ce5c-account-create-6bcw4\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.123366 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.153602 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8s242" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.218795 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8cjjt"] Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.240863 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.262675 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tfxz8"] Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.481853 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="619e83e9-c912-4f56-832c-df1c6ab8f428" path="/var/lib/kubelet/pods/619e83e9-c912-4f56-832c-df1c6ab8f428/volumes" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.524723 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-400b-account-create-cb447"] Nov 24 17:19:49 crc kubenswrapper[4760]: W1124 17:19:49.531180 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc57550e3_ccb2_47ac_bd84_1ed2f7eef985.slice/crio-0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d WatchSource:0}: Error finding container 0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d: Status 404 returned error can't find the container with id 0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.625746 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b2cd-account-create-vmd78"] Nov 24 17:19:49 crc kubenswrapper[4760]: W1124 17:19:49.644797 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee83fcdf_cf1d_4e1a_9d95_c9b521cfc398.slice/crio-f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a WatchSource:0}: Error finding container f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a: Status 404 returned error can't find the container with id f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.646316 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8cjjt" event={"ID":"e76c2370-1f1c-4336-a95b-c5d11492ebe6","Type":"ContainerStarted","Data":"b449ad24ce613c6512a596bef84f34cff188e37587d6cbeacd0d4cf54b19f22f"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.646348 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8cjjt" event={"ID":"e76c2370-1f1c-4336-a95b-c5d11492ebe6","Type":"ContainerStarted","Data":"759a6851f625fb1f6f35937c8fbd7293a98da30b05e647ffb02103a10d3ce3ce"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.654348 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tfxz8" event={"ID":"f07cffba-dc3b-495a-a378-135d2b830d7a","Type":"ContainerStarted","Data":"63254f79d27ee8428f886351c1a7412eb58275ec63e328ee4e1741fd902b47d6"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.654380 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tfxz8" event={"ID":"f07cffba-dc3b-495a-a378-135d2b830d7a","Type":"ContainerStarted","Data":"4bcb8f3ea20ecad17022124228304b29ea615bf002f36b82911f6aecd9ba8637"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.668292 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-8cjjt" podStartSLOduration=1.6682764799999998 podStartE2EDuration="1.66827648s" podCreationTimestamp="2025-11-24 17:19:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:49.660819606 +0000 UTC m=+984.983701156" watchObservedRunningTime="2025-11-24 17:19:49.66827648 +0000 UTC m=+984.991158030" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.669516 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-400b-account-create-cb447" event={"ID":"c57550e3-ccb2-47ac-bd84-1ed2f7eef985","Type":"ContainerStarted","Data":"0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.675517 4760 generic.go:334] "Generic (PLEG): container finished" podID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerID="a58b25e00e07ff29add1b0379a67278dc07b9c1630bbccdb1f5924c9700af55d" exitCode=0 Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.675560 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" event={"ID":"783a6396-635c-42d2-87b2-3c66d6b2bec0","Type":"ContainerDied","Data":"a58b25e00e07ff29add1b0379a67278dc07b9c1630bbccdb1f5924c9700af55d"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.675585 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" event={"ID":"783a6396-635c-42d2-87b2-3c66d6b2bec0","Type":"ContainerStarted","Data":"f7883474d480dd996f29a191ed8f040afad2696f562886e41bc2dc7cb240fe62"} Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.684711 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-tfxz8" podStartSLOduration=1.6846957200000001 podStartE2EDuration="1.68469572s" podCreationTimestamp="2025-11-24 17:19:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:49.679016888 +0000 UTC m=+985.001898438" watchObservedRunningTime="2025-11-24 17:19:49.68469572 +0000 UTC m=+985.007577270" Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.768841 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pbkv7"] Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.784421 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8s242"] Nov 24 17:19:49 crc kubenswrapper[4760]: W1124 17:19:49.855807 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod695177f8_fcf1_4cdd_8d7a_b6ae266fe224.slice/crio-186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826 WatchSource:0}: Error finding container 186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826: Status 404 returned error can't find the container with id 186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826 Nov 24 17:19:49 crc kubenswrapper[4760]: W1124 17:19:49.856946 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4698227_1751_478c_8996_63502f8c74da.slice/crio-19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305 WatchSource:0}: Error finding container 19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305: Status 404 returned error can't find the container with id 19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305 Nov 24 17:19:49 crc kubenswrapper[4760]: I1124 17:19:49.883261 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ce5c-account-create-6bcw4"] Nov 24 17:19:49 crc kubenswrapper[4760]: W1124 17:19:49.933524 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33401460_35b1_40fd_8bf4_3b0e3d7cba89.slice/crio-091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e WatchSource:0}: Error finding container 091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e: Status 404 returned error can't find the container with id 091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.683897 4760 generic.go:334] "Generic (PLEG): container finished" podID="c57550e3-ccb2-47ac-bd84-1ed2f7eef985" containerID="7c52f1e0579377f97f88ca8d66897c10c040d3d51fdca3dc7abf1d75624b8697" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.684116 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-400b-account-create-cb447" event={"ID":"c57550e3-ccb2-47ac-bd84-1ed2f7eef985","Type":"ContainerDied","Data":"7c52f1e0579377f97f88ca8d66897c10c040d3d51fdca3dc7abf1d75624b8697"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.686815 4760 generic.go:334] "Generic (PLEG): container finished" podID="ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" containerID="7fedc2bc56f4571d58f77b6b748288010857142ba3b4d8aeb33ac14eb8fd03f1" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.686869 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b2cd-account-create-vmd78" event={"ID":"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398","Type":"ContainerDied","Data":"7fedc2bc56f4571d58f77b6b748288010857142ba3b4d8aeb33ac14eb8fd03f1"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.686893 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b2cd-account-create-vmd78" event={"ID":"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398","Type":"ContainerStarted","Data":"f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.689599 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" event={"ID":"783a6396-635c-42d2-87b2-3c66d6b2bec0","Type":"ContainerStarted","Data":"0410d5251a6c6b2c9d450530fd6b105b023450f7cc1a05f86a666dd233bb0333"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.690334 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.696632 4760 generic.go:334] "Generic (PLEG): container finished" podID="e76c2370-1f1c-4336-a95b-c5d11492ebe6" containerID="b449ad24ce613c6512a596bef84f34cff188e37587d6cbeacd0d4cf54b19f22f" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.696688 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8cjjt" event={"ID":"e76c2370-1f1c-4336-a95b-c5d11492ebe6","Type":"ContainerDied","Data":"b449ad24ce613c6512a596bef84f34cff188e37587d6cbeacd0d4cf54b19f22f"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.698683 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbkv7" event={"ID":"e4698227-1751-478c-8996-63502f8c74da","Type":"ContainerStarted","Data":"19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.710036 4760 generic.go:334] "Generic (PLEG): container finished" podID="33401460-35b1-40fd-8bf4-3b0e3d7cba89" containerID="9e21020274d85f15b71e7507ea3468d947ea60485bb11a30dd53aeae27d2f58a" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.710159 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ce5c-account-create-6bcw4" event={"ID":"33401460-35b1-40fd-8bf4-3b0e3d7cba89","Type":"ContainerDied","Data":"9e21020274d85f15b71e7507ea3468d947ea60485bb11a30dd53aeae27d2f58a"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.710210 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ce5c-account-create-6bcw4" event={"ID":"33401460-35b1-40fd-8bf4-3b0e3d7cba89","Type":"ContainerStarted","Data":"091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.716667 4760 generic.go:334] "Generic (PLEG): container finished" podID="f07cffba-dc3b-495a-a378-135d2b830d7a" containerID="63254f79d27ee8428f886351c1a7412eb58275ec63e328ee4e1741fd902b47d6" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.716882 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tfxz8" event={"ID":"f07cffba-dc3b-495a-a378-135d2b830d7a","Type":"ContainerDied","Data":"63254f79d27ee8428f886351c1a7412eb58275ec63e328ee4e1741fd902b47d6"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.719279 4760 generic.go:334] "Generic (PLEG): container finished" podID="695177f8-fcf1-4cdd-8d7a-b6ae266fe224" containerID="5e3d83940ec044a41c5fac47c8418bf678378ea338c0cbc4bdafe5e2ff6cde0b" exitCode=0 Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.719321 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8s242" event={"ID":"695177f8-fcf1-4cdd-8d7a-b6ae266fe224","Type":"ContainerDied","Data":"5e3d83940ec044a41c5fac47c8418bf678378ea338c0cbc4bdafe5e2ff6cde0b"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.719344 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8s242" event={"ID":"695177f8-fcf1-4cdd-8d7a-b6ae266fe224","Type":"ContainerStarted","Data":"186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826"} Nov 24 17:19:50 crc kubenswrapper[4760]: I1124 17:19:50.747628 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" podStartSLOduration=3.74760862 podStartE2EDuration="3.74760862s" podCreationTimestamp="2025-11-24 17:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:19:50.745190171 +0000 UTC m=+986.068071721" watchObservedRunningTime="2025-11-24 17:19:50.74760862 +0000 UTC m=+986.070490170" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.765373 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ce5c-account-create-6bcw4" event={"ID":"33401460-35b1-40fd-8bf4-3b0e3d7cba89","Type":"ContainerDied","Data":"091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.766118 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="091d691e8a0ac9c1a033e65d88cb822412dd605ca51dd0f659fe040680cdc14e" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.767409 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tfxz8" event={"ID":"f07cffba-dc3b-495a-a378-135d2b830d7a","Type":"ContainerDied","Data":"4bcb8f3ea20ecad17022124228304b29ea615bf002f36b82911f6aecd9ba8637"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.767443 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4bcb8f3ea20ecad17022124228304b29ea615bf002f36b82911f6aecd9ba8637" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.768991 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8s242" event={"ID":"695177f8-fcf1-4cdd-8d7a-b6ae266fe224","Type":"ContainerDied","Data":"186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.769058 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="186b4a538c3abae1839fd77d1a9b06d1b07fa9e0ac09f1a7aefab99576f1f826" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.771556 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-400b-account-create-cb447" event={"ID":"c57550e3-ccb2-47ac-bd84-1ed2f7eef985","Type":"ContainerDied","Data":"0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.771580 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ecec0f4be6d237115afaf4bcb4d64e101f02b1bcf5aa3a04db1fbdec18e6f7d" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.773806 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b2cd-account-create-vmd78" event={"ID":"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398","Type":"ContainerDied","Data":"f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.773833 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6a91bb58246a0dd9f67039f01d77cae75a5f693ebee281726f1696ac20b5b3a" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.775453 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8cjjt" event={"ID":"e76c2370-1f1c-4336-a95b-c5d11492ebe6","Type":"ContainerDied","Data":"759a6851f625fb1f6f35937c8fbd7293a98da30b05e647ffb02103a10d3ce3ce"} Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.775481 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="759a6851f625fb1f6f35937c8fbd7293a98da30b05e647ffb02103a10d3ce3ce" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.795441 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.854217 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96s2h\" (UniqueName: \"kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h\") pod \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.854284 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts\") pod \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\" (UID: \"e76c2370-1f1c-4336-a95b-c5d11492ebe6\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.854776 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e76c2370-1f1c-4336-a95b-c5d11492ebe6" (UID: "e76c2370-1f1c-4336-a95b-c5d11492ebe6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.855571 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e76c2370-1f1c-4336-a95b-c5d11492ebe6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.857786 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.859845 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h" (OuterVolumeSpecName: "kube-api-access-96s2h") pod "e76c2370-1f1c-4336-a95b-c5d11492ebe6" (UID: "e76c2370-1f1c-4336-a95b-c5d11492ebe6"). InnerVolumeSpecName "kube-api-access-96s2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.919743 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8s242" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957204 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts\") pod \"f07cffba-dc3b-495a-a378-135d2b830d7a\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957322 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64vn6\" (UniqueName: \"kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6\") pod \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957383 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czz8r\" (UniqueName: \"kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r\") pod \"f07cffba-dc3b-495a-a378-135d2b830d7a\" (UID: \"f07cffba-dc3b-495a-a378-135d2b830d7a\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957430 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts\") pod \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\" (UID: \"695177f8-fcf1-4cdd-8d7a-b6ae266fe224\") " Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957739 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f07cffba-dc3b-495a-a378-135d2b830d7a" (UID: "f07cffba-dc3b-495a-a378-135d2b830d7a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.957897 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "695177f8-fcf1-4cdd-8d7a-b6ae266fe224" (UID: "695177f8-fcf1-4cdd-8d7a-b6ae266fe224"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.958128 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07cffba-dc3b-495a-a378-135d2b830d7a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.958150 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96s2h\" (UniqueName: \"kubernetes.io/projected/e76c2370-1f1c-4336-a95b-c5d11492ebe6-kube-api-access-96s2h\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.958162 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.966167 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6" (OuterVolumeSpecName: "kube-api-access-64vn6") pod "695177f8-fcf1-4cdd-8d7a-b6ae266fe224" (UID: "695177f8-fcf1-4cdd-8d7a-b6ae266fe224"). InnerVolumeSpecName "kube-api-access-64vn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.966218 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r" (OuterVolumeSpecName: "kube-api-access-czz8r") pod "f07cffba-dc3b-495a-a378-135d2b830d7a" (UID: "f07cffba-dc3b-495a-a378-135d2b830d7a"). InnerVolumeSpecName "kube-api-access-czz8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:54 crc kubenswrapper[4760]: I1124 17:19:54.970127 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.020786 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.023483 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.058958 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4vws\" (UniqueName: \"kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws\") pod \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.059071 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts\") pod \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\" (UID: \"33401460-35b1-40fd-8bf4-3b0e3d7cba89\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.059128 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kprdd\" (UniqueName: \"kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd\") pod \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.059199 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts\") pod \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\" (UID: \"ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.059237 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts\") pod \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.059357 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2cwm\" (UniqueName: \"kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm\") pod \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\" (UID: \"c57550e3-ccb2-47ac-bd84-1ed2f7eef985\") " Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.060049 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64vn6\" (UniqueName: \"kubernetes.io/projected/695177f8-fcf1-4cdd-8d7a-b6ae266fe224-kube-api-access-64vn6\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.060083 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czz8r\" (UniqueName: \"kubernetes.io/projected/f07cffba-dc3b-495a-a378-135d2b830d7a-kube-api-access-czz8r\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.060258 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c57550e3-ccb2-47ac-bd84-1ed2f7eef985" (UID: "c57550e3-ccb2-47ac-bd84-1ed2f7eef985"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.060283 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" (UID: "ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.060541 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "33401460-35b1-40fd-8bf4-3b0e3d7cba89" (UID: "33401460-35b1-40fd-8bf4-3b0e3d7cba89"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.063781 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws" (OuterVolumeSpecName: "kube-api-access-r4vws") pod "33401460-35b1-40fd-8bf4-3b0e3d7cba89" (UID: "33401460-35b1-40fd-8bf4-3b0e3d7cba89"). InnerVolumeSpecName "kube-api-access-r4vws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.063850 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm" (OuterVolumeSpecName: "kube-api-access-w2cwm") pod "c57550e3-ccb2-47ac-bd84-1ed2f7eef985" (UID: "c57550e3-ccb2-47ac-bd84-1ed2f7eef985"). InnerVolumeSpecName "kube-api-access-w2cwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.064528 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd" (OuterVolumeSpecName: "kube-api-access-kprdd") pod "ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" (UID: "ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398"). InnerVolumeSpecName "kube-api-access-kprdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163162 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4vws\" (UniqueName: \"kubernetes.io/projected/33401460-35b1-40fd-8bf4-3b0e3d7cba89-kube-api-access-r4vws\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163214 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33401460-35b1-40fd-8bf4-3b0e3d7cba89-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163235 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kprdd\" (UniqueName: \"kubernetes.io/projected/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-kube-api-access-kprdd\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163253 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163272 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.163290 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2cwm\" (UniqueName: \"kubernetes.io/projected/c57550e3-ccb2-47ac-bd84-1ed2f7eef985-kube-api-access-w2cwm\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.796665 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8cjjt" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.798652 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbkv7" event={"ID":"e4698227-1751-478c-8996-63502f8c74da","Type":"ContainerStarted","Data":"81ac5b770eaff3bd3b3ef49f81e9f0abea588fcd645008cb799320f83bd47a69"} Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.798749 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8s242" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.799411 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ce5c-account-create-6bcw4" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.800492 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-400b-account-create-cb447" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.801434 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tfxz8" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.801549 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b2cd-account-create-vmd78" Nov 24 17:19:55 crc kubenswrapper[4760]: I1124 17:19:55.835123 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-pbkv7" podStartSLOduration=2.982499316 podStartE2EDuration="7.835101698s" podCreationTimestamp="2025-11-24 17:19:48 +0000 UTC" firstStartedPulling="2025-11-24 17:19:49.862605144 +0000 UTC m=+985.185486684" lastFinishedPulling="2025-11-24 17:19:54.715207516 +0000 UTC m=+990.038089066" observedRunningTime="2025-11-24 17:19:55.815828996 +0000 UTC m=+991.138710626" watchObservedRunningTime="2025-11-24 17:19:55.835101698 +0000 UTC m=+991.157983268" Nov 24 17:19:57 crc kubenswrapper[4760]: I1124 17:19:57.817245 4760 generic.go:334] "Generic (PLEG): container finished" podID="e4698227-1751-478c-8996-63502f8c74da" containerID="81ac5b770eaff3bd3b3ef49f81e9f0abea588fcd645008cb799320f83bd47a69" exitCode=0 Nov 24 17:19:57 crc kubenswrapper[4760]: I1124 17:19:57.817321 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbkv7" event={"ID":"e4698227-1751-478c-8996-63502f8c74da","Type":"ContainerDied","Data":"81ac5b770eaff3bd3b3ef49f81e9f0abea588fcd645008cb799320f83bd47a69"} Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.322679 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.410480 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.410819 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-tqdfj" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="dnsmasq-dns" containerID="cri-o://f8fa40b17e898905ec83c516573a21e4707a3508f1c55da17dccf91f0952b4d0" gracePeriod=10 Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.827133 4760 generic.go:334] "Generic (PLEG): container finished" podID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerID="f8fa40b17e898905ec83c516573a21e4707a3508f1c55da17dccf91f0952b4d0" exitCode=0 Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.827504 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerDied","Data":"f8fa40b17e898905ec83c516573a21e4707a3508f1c55da17dccf91f0952b4d0"} Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.827529 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tqdfj" event={"ID":"c9090ecc-9df5-4a09-8360-9d11fa34833f","Type":"ContainerDied","Data":"677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54"} Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.827540 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="677dcc3df821bfa370c77abb09b1025cf5eaa94b54ecb78d02843352d6149a54" Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.867626 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.941947 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb\") pod \"c9090ecc-9df5-4a09-8360-9d11fa34833f\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.942067 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc\") pod \"c9090ecc-9df5-4a09-8360-9d11fa34833f\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.942176 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4v7\" (UniqueName: \"kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7\") pod \"c9090ecc-9df5-4a09-8360-9d11fa34833f\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.942209 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb\") pod \"c9090ecc-9df5-4a09-8360-9d11fa34833f\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.942237 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config\") pod \"c9090ecc-9df5-4a09-8360-9d11fa34833f\" (UID: \"c9090ecc-9df5-4a09-8360-9d11fa34833f\") " Nov 24 17:19:58 crc kubenswrapper[4760]: I1124 17:19:58.950667 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7" (OuterVolumeSpecName: "kube-api-access-7c4v7") pod "c9090ecc-9df5-4a09-8360-9d11fa34833f" (UID: "c9090ecc-9df5-4a09-8360-9d11fa34833f"). InnerVolumeSpecName "kube-api-access-7c4v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.011922 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c9090ecc-9df5-4a09-8360-9d11fa34833f" (UID: "c9090ecc-9df5-4a09-8360-9d11fa34833f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.014636 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c9090ecc-9df5-4a09-8360-9d11fa34833f" (UID: "c9090ecc-9df5-4a09-8360-9d11fa34833f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.014733 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config" (OuterVolumeSpecName: "config") pod "c9090ecc-9df5-4a09-8360-9d11fa34833f" (UID: "c9090ecc-9df5-4a09-8360-9d11fa34833f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.022167 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c9090ecc-9df5-4a09-8360-9d11fa34833f" (UID: "c9090ecc-9df5-4a09-8360-9d11fa34833f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.045020 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.045050 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.045064 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4v7\" (UniqueName: \"kubernetes.io/projected/c9090ecc-9df5-4a09-8360-9d11fa34833f-kube-api-access-7c4v7\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.045075 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.045084 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9090ecc-9df5-4a09-8360-9d11fa34833f-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.079796 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.146183 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle\") pod \"e4698227-1751-478c-8996-63502f8c74da\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.146378 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vl58\" (UniqueName: \"kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58\") pod \"e4698227-1751-478c-8996-63502f8c74da\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.146440 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data\") pod \"e4698227-1751-478c-8996-63502f8c74da\" (UID: \"e4698227-1751-478c-8996-63502f8c74da\") " Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.153211 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58" (OuterVolumeSpecName: "kube-api-access-8vl58") pod "e4698227-1751-478c-8996-63502f8c74da" (UID: "e4698227-1751-478c-8996-63502f8c74da"). InnerVolumeSpecName "kube-api-access-8vl58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.170228 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4698227-1751-478c-8996-63502f8c74da" (UID: "e4698227-1751-478c-8996-63502f8c74da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.205028 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data" (OuterVolumeSpecName: "config-data") pod "e4698227-1751-478c-8996-63502f8c74da" (UID: "e4698227-1751-478c-8996-63502f8c74da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.248541 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vl58\" (UniqueName: \"kubernetes.io/projected/e4698227-1751-478c-8996-63502f8c74da-kube-api-access-8vl58\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.248803 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.248895 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4698227-1751-478c-8996-63502f8c74da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.837769 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pbkv7" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.837770 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tqdfj" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.837762 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pbkv7" event={"ID":"e4698227-1751-478c-8996-63502f8c74da","Type":"ContainerDied","Data":"19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305"} Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.839277 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19084a6ab15832fc5df71ce7c4c5cb35be0e2d6d1baf4e2d666a94214e217305" Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.877924 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:19:59 crc kubenswrapper[4760]: I1124 17:19:59.900887 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tqdfj"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.114598 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.114966 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="dnsmasq-dns" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.114986 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="dnsmasq-dns" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115020 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4698227-1751-478c-8996-63502f8c74da" containerName="keystone-db-sync" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115028 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4698227-1751-478c-8996-63502f8c74da" containerName="keystone-db-sync" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115050 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57550e3-ccb2-47ac-bd84-1ed2f7eef985" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115059 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57550e3-ccb2-47ac-bd84-1ed2f7eef985" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115067 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="init" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115074 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="init" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115089 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33401460-35b1-40fd-8bf4-3b0e3d7cba89" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115096 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="33401460-35b1-40fd-8bf4-3b0e3d7cba89" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115118 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f07cffba-dc3b-495a-a378-135d2b830d7a" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115126 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f07cffba-dc3b-495a-a378-135d2b830d7a" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115138 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115145 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115161 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695177f8-fcf1-4cdd-8d7a-b6ae266fe224" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115171 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="695177f8-fcf1-4cdd-8d7a-b6ae266fe224" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: E1124 17:20:00.115193 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e76c2370-1f1c-4336-a95b-c5d11492ebe6" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115202 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e76c2370-1f1c-4336-a95b-c5d11492ebe6" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115395 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" containerName="dnsmasq-dns" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115425 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e76c2370-1f1c-4336-a95b-c5d11492ebe6" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115445 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57550e3-ccb2-47ac-bd84-1ed2f7eef985" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115463 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="695177f8-fcf1-4cdd-8d7a-b6ae266fe224" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115477 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4698227-1751-478c-8996-63502f8c74da" containerName="keystone-db-sync" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115499 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="33401460-35b1-40fd-8bf4-3b0e3d7cba89" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115517 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f07cffba-dc3b-495a-a378-135d2b830d7a" containerName="mariadb-database-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.115532 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" containerName="mariadb-account-create" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.119472 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.168091 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-kltrw"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.170486 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.174840 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.175024 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.175130 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.175032 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.181731 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t62xm" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.194550 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.209169 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kltrw"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268683 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268736 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268786 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268822 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268855 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.268885 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5krc\" (UniqueName: \"kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.303394 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.304663 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.307177 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.308851 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.308978 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-rhmkc" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.309122 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.323330 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.354505 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-gszfs"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.361385 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.364751 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.364964 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-tk7rw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.365208 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.368815 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-gszfs"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372784 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372828 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372869 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372901 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372924 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372942 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372958 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4z2c\" (UniqueName: \"kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372976 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.372991 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.373035 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5krc\" (UniqueName: \"kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.373057 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.373083 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.374461 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.378421 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.378979 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.379399 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.379820 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.421906 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5krc\" (UniqueName: \"kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc\") pod \"dnsmasq-dns-847c4cc679-n56w9\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.436344 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.444530 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.448103 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.450558 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.450858 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.453321 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.473107 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-8x9fd"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474103 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474129 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474161 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474184 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474200 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474220 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474236 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474250 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474268 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4z2c\" (UniqueName: \"kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474286 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474301 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474338 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474359 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474372 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474393 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k4kh\" (UniqueName: \"kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474428 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9wc5\" (UniqueName: \"kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.474454 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.476245 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.488053 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8x9fd"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.488587 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.490422 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.496254 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-swvgc" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.496468 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.496580 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.510521 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-bkqxk"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.510917 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.513481 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.515477 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.518282 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.521733 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.521802 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-bkqxk"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.524694 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-cx5qk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.525148 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.542429 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4z2c\" (UniqueName: \"kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c\") pod \"keystone-bootstrap-kltrw\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575781 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575830 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575863 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575882 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575954 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.575986 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576027 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576048 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576070 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576110 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k4kh\" (UniqueName: \"kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576141 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9wc5\" (UniqueName: \"kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576187 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpnjs\" (UniqueName: \"kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576231 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576293 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576342 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576367 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjwg7\" (UniqueName: \"kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576382 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576396 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576435 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576452 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.576505 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.580401 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.585211 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.593138 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.593818 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.594174 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.594485 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.597501 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.603266 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.617886 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.634537 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k4kh\" (UniqueName: \"kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh\") pod \"cinder-db-sync-gszfs\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.637068 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-z6w2f"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.637412 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9wc5\" (UniqueName: \"kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5\") pod \"horizon-5db6ccb479-g865g\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.638108 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.653774 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.655420 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-4hdsn" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679131 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679162 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjwg7\" (UniqueName: \"kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679178 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679196 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679220 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679271 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679288 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679317 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679338 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679371 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679388 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpnjs\" (UniqueName: \"kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679409 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679428 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679449 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.679467 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxppc\" (UniqueName: \"kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.692705 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.693320 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.693823 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.695720 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.702094 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.702457 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.702552 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.705696 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.706055 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.725603 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpnjs\" (UniqueName: \"kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs\") pod \"ceilometer-0\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " pod="openstack/ceilometer-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.726587 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjwg7\" (UniqueName: \"kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7\") pod \"neutron-db-sync-8x9fd\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.728380 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.734513 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.742156 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.743502 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.767735 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.786712 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.787657 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2bglw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.787850 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789369 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789425 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4qtf\" (UniqueName: \"kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789489 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789540 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789577 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789614 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.789637 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxppc\" (UniqueName: \"kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.791623 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.795844 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z6w2f"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.803804 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.806112 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.808836 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.811354 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.824864 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.828436 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.834448 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.835144 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxppc\" (UniqueName: \"kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc\") pod \"placement-db-sync-bkqxk\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.873926 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.889091 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.892155 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898785 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898844 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898866 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898902 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898924 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4qtf\" (UniqueName: \"kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898939 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898957 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lrsl\" (UniqueName: \"kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.898986 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899015 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899041 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899084 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899107 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899123 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899151 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899185 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.899207 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndzff\" (UniqueName: \"kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.910523 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.916294 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.920651 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.921947 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.923596 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.926623 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.926794 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.938485 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.945602 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4qtf\" (UniqueName: \"kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf\") pod \"barbican-db-sync-z6w2f\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:00 crc kubenswrapper[4760]: I1124 17:20:00.945656 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002386 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002439 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9q7c\" (UniqueName: \"kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002461 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002483 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002512 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002529 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002557 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002583 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002602 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002620 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndzff\" (UniqueName: \"kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002636 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002655 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002671 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002703 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002731 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002748 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002769 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002785 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002804 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002819 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002846 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002859 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002877 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lrsl\" (UniqueName: \"kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002915 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002938 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002953 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.002970 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q55qw\" (UniqueName: \"kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.006547 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.007521 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.007647 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.008328 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.008697 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.009131 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.009730 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.009925 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.010052 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.010237 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.014994 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.022637 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.033562 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lrsl\" (UniqueName: \"kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl\") pod \"horizon-85696549bf-k4b6z\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.037470 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndzff\" (UniqueName: \"kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.055629 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.104782 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.104991 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105108 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105193 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105294 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105378 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q55qw\" (UniqueName: \"kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105471 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105558 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105636 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9q7c\" (UniqueName: \"kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105817 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105903 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.105978 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.106110 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.106233 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.106455 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.106739 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.106166 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.107120 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.107172 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.107484 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.110409 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.112260 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.112896 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.120265 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.122097 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.122469 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.124305 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9q7c\" (UniqueName: \"kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c\") pod \"dnsmasq-dns-785d8bcb8c-lkmt5\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.128665 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q55qw\" (UniqueName: \"kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.131443 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.143668 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.156268 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.157597 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.175111 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.186589 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:01 crc kubenswrapper[4760]: W1124 17:20:01.221975 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4d98020_5f63_49ec_b4b1_cf27c17eb5a3.slice/crio-06830c253bc1431684ac7c2e19cf11a26cd3f31d3277af81063beeae7aeff36e WatchSource:0}: Error finding container 06830c253bc1431684ac7c2e19cf11a26cd3f31d3277af81063beeae7aeff36e: Status 404 returned error can't find the container with id 06830c253bc1431684ac7c2e19cf11a26cd3f31d3277af81063beeae7aeff36e Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.230352 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.295291 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.355928 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-gszfs"] Nov 24 17:20:01 crc kubenswrapper[4760]: W1124 17:20:01.380767 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182b9849_0723_4fa8_bade_df2f05e6cf37.slice/crio-9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67 WatchSource:0}: Error finding container 9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67: Status 404 returned error can't find the container with id 9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67 Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.391947 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8x9fd"] Nov 24 17:20:01 crc kubenswrapper[4760]: W1124 17:20:01.406320 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1080c24b_f48e_4150_b4cb_c0b1bf1081e3.slice/crio-2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754 WatchSource:0}: Error finding container 2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754: Status 404 returned error can't find the container with id 2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754 Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.486142 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9090ecc-9df5-4a09-8360-9d11fa34833f" path="/var/lib/kubelet/pods/c9090ecc-9df5-4a09-8360-9d11fa34833f/volumes" Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.566073 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.592368 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kltrw"] Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.705164 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:01 crc kubenswrapper[4760]: I1124 17:20:01.830351 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-bkqxk"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.911339 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerStarted","Data":"4bfc7bd14af24f301fcf430a0cf25a81efc000463d33434053048a4c11459a02"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.915951 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5db6ccb479-g865g" event={"ID":"136a5219-09ae-4a0c-a3d6-1007b0818546","Type":"ContainerStarted","Data":"de24f54e3dd5dc8eab2fe760a218a8bf6e1dbbba4b4d6f7d3feb799676184bcd"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.918139 4760 generic.go:334] "Generic (PLEG): container finished" podID="f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" containerID="505d0744be9b0931ee4c84498a60083d346d16449ef4acfec79b423681217f17" exitCode=0 Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.918340 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" event={"ID":"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3","Type":"ContainerDied","Data":"505d0744be9b0931ee4c84498a60083d346d16449ef4acfec79b423681217f17"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.918367 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" event={"ID":"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3","Type":"ContainerStarted","Data":"06830c253bc1431684ac7c2e19cf11a26cd3f31d3277af81063beeae7aeff36e"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.924625 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8x9fd" event={"ID":"1080c24b-f48e-4150-b4cb-c0b1bf1081e3","Type":"ContainerStarted","Data":"2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.927872 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-gszfs" event={"ID":"182b9849-0723-4fa8-bade-df2f05e6cf37","Type":"ContainerStarted","Data":"9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.931637 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-bkqxk" event={"ID":"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7","Type":"ContainerStarted","Data":"fa06ad9eddff5deba2fb20bf0d248765bc4257c954b21c6fde642608a19d3b8c"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.967210 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kltrw" event={"ID":"7fe4a46e-79cc-4a38-b108-aa1069ddf998","Type":"ContainerStarted","Data":"64b19e39b41779da4f745e81dc082316639c9c425ce6ce88c8a04002ebcc5d5b"} Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:01.997621 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z6w2f"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.004747 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:02 crc kubenswrapper[4760]: W1124 17:20:02.016063 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5dafeef_ad07_4a83_97e8_ab8ae557a002.slice/crio-d28a38739015ac473eebbd64eb384ab5881a3d6eb142df9972c1d73ce70a8299 WatchSource:0}: Error finding container d28a38739015ac473eebbd64eb384ab5881a3d6eb142df9972c1d73ce70a8299: Status 404 returned error can't find the container with id d28a38739015ac473eebbd64eb384ab5881a3d6eb142df9972c1d73ce70a8299 Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.063506 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.089869 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.137747 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:02 crc kubenswrapper[4760]: W1124 17:20:02.185154 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91e8f93_94f8_427b_85cc_aaa3b930f03c.slice/crio-9b309b6506b54ebf876dbbf83562f65de15ce06ff8e98d9b52f8c028fc6815a0 WatchSource:0}: Error finding container 9b309b6506b54ebf876dbbf83562f65de15ce06ff8e98d9b52f8c028fc6815a0: Status 404 returned error can't find the container with id 9b309b6506b54ebf876dbbf83562f65de15ce06ff8e98d9b52f8c028fc6815a0 Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.632169 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.690649 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.724148 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.725558 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.744460 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.757557 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.843806 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.843847 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.843926 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.845044 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.845171 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds9jc\" (UniqueName: \"kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.944434 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.947832 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.948923 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.949085 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.949175 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.949198 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds9jc\" (UniqueName: \"kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.949553 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.950288 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.950532 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:02 crc kubenswrapper[4760]: I1124 17:20:02.969198 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.011608 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds9jc\" (UniqueName: \"kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc\") pod \"horizon-5b5747d5f-59ddv\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.015638 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kltrw" event={"ID":"7fe4a46e-79cc-4a38-b108-aa1069ddf998","Type":"ContainerStarted","Data":"390e1a2dfd64e49424059bac17bb0c826f2e248e2b831d5dafeb3975570fffb2"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.037505 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-kltrw" podStartSLOduration=3.037485844 podStartE2EDuration="3.037485844s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:03.032337776 +0000 UTC m=+998.355219326" watchObservedRunningTime="2025-11-24 17:20:03.037485844 +0000 UTC m=+998.360367394" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.075880 4760 generic.go:334] "Generic (PLEG): container finished" podID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerID="c170a41a6f262115b60a746628ff265c080282afe870a00c486fa361f52f6a7e" exitCode=0 Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.075951 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" event={"ID":"c84af16b-fab3-4d5c-bb27-4e04ad255e74","Type":"ContainerDied","Data":"c170a41a6f262115b60a746628ff265c080282afe870a00c486fa361f52f6a7e"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.075982 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" event={"ID":"c84af16b-fab3-4d5c-bb27-4e04ad255e74","Type":"ContainerStarted","Data":"d93eac488f3d17072bca7b7871c82c2f674559c93c7e47be31dcf4c11e919d4c"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.087211 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8x9fd" event={"ID":"1080c24b-f48e-4150-b4cb-c0b1bf1081e3","Type":"ContainerStarted","Data":"77b1a9c3f941b58bd531ae2d0729aad1e639c502dfae3b6abb37ab3aaf918921"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.090401 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85696549bf-k4b6z" event={"ID":"9e2b0ea2-3b39-49a5-b399-2d000325a743","Type":"ContainerStarted","Data":"03199d6402cf4771451621f824be0c62cd0f79978bc85a8880a56f866233d711"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.111289 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerStarted","Data":"9b309b6506b54ebf876dbbf83562f65de15ce06ff8e98d9b52f8c028fc6815a0"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.118306 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.121238 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerStarted","Data":"d28a38739015ac473eebbd64eb384ab5881a3d6eb142df9972c1d73ce70a8299"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.123159 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z6w2f" event={"ID":"195eb4e3-2851-4742-ba6a-48f56b7ac231","Type":"ContainerStarted","Data":"f7cd11c486a99c41573a3c9a9b7b2c3c812735de872429e4031868b1c2746d8d"} Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.124667 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-8x9fd" podStartSLOduration=3.12464734 podStartE2EDuration="3.12464734s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:03.116291171 +0000 UTC m=+998.439172721" watchObservedRunningTime="2025-11-24 17:20:03.12464734 +0000 UTC m=+998.447528900" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.204312 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367282 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367685 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5krc\" (UniqueName: \"kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367724 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367798 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367889 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.367922 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb\") pod \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\" (UID: \"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3\") " Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.392393 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc" (OuterVolumeSpecName: "kube-api-access-v5krc") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "kube-api-access-v5krc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.479164 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5krc\" (UniqueName: \"kubernetes.io/projected/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-kube-api-access-v5krc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.568076 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.581093 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.586724 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.598834 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.608340 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.609300 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config" (OuterVolumeSpecName: "config") pod "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" (UID: "f4d98020-5f63-49ec-b4b1-cf27c17eb5a3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.683063 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.683091 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.683101 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.683110 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:03 crc kubenswrapper[4760]: I1124 17:20:03.814696 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.162318 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerStarted","Data":"cbb1a88197730d0367a94b4e8fe999cc8633a676d929854a98d516da4e061991"} Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.169953 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerStarted","Data":"74c0ce70605e9d81beef6156ca21dce27986514785a5a84e9219d0201ee651d4"} Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.173354 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" event={"ID":"c84af16b-fab3-4d5c-bb27-4e04ad255e74","Type":"ContainerStarted","Data":"32827c9c51625aac00a4aa0dd129e4a22aa8eb971254c98717bc20267e5fdc7a"} Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.173400 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.175745 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" event={"ID":"f4d98020-5f63-49ec-b4b1-cf27c17eb5a3","Type":"ContainerDied","Data":"06830c253bc1431684ac7c2e19cf11a26cd3f31d3277af81063beeae7aeff36e"} Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.175781 4760 scope.go:117] "RemoveContainer" containerID="505d0744be9b0931ee4c84498a60083d346d16449ef4acfec79b423681217f17" Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.175890 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-n56w9" Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.183979 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5747d5f-59ddv" event={"ID":"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46","Type":"ContainerStarted","Data":"201ba9a00b37baf8fa6feddafd688163a7541b6ce7938910ed7385049672826e"} Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.205900 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" podStartSLOduration=4.205883884 podStartE2EDuration="4.205883884s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:04.201526179 +0000 UTC m=+999.524407749" watchObservedRunningTime="2025-11-24 17:20:04.205883884 +0000 UTC m=+999.528765434" Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.271148 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:04 crc kubenswrapper[4760]: I1124 17:20:04.291570 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-n56w9"] Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.478045 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" path="/var/lib/kubelet/pods/f4d98020-5f63-49ec-b4b1-cf27c17eb5a3/volumes" Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.644448 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.644523 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.644576 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.645377 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:20:05 crc kubenswrapper[4760]: I1124 17:20:05.645452 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8" gracePeriod=600 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.208573 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerStarted","Data":"b6ee534cf8057eeccdaf62f7717670e80ae4cef7d2f4753d880993816ae073e2"} Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.208915 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-log" containerID="cri-o://74c0ce70605e9d81beef6156ca21dce27986514785a5a84e9219d0201ee651d4" gracePeriod=30 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.209049 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-httpd" containerID="cri-o://b6ee534cf8057eeccdaf62f7717670e80ae4cef7d2f4753d880993816ae073e2" gracePeriod=30 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.219307 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8" exitCode=0 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.219393 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8"} Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.219464 4760 scope.go:117] "RemoveContainer" containerID="82b000a4d02003c883bf71c824299533ea1c6d3009389b2511a2787ceedc0656" Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.227553 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerStarted","Data":"d8cc9ef4713673baeac1401c18c3ae281243cf8bd6073ad0a6d78a53971bc3ee"} Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.227726 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-log" containerID="cri-o://cbb1a88197730d0367a94b4e8fe999cc8633a676d929854a98d516da4e061991" gracePeriod=30 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.228196 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-httpd" containerID="cri-o://d8cc9ef4713673baeac1401c18c3ae281243cf8bd6073ad0a6d78a53971bc3ee" gracePeriod=30 Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.236401 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.236378315 podStartE2EDuration="6.236378315s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:06.225193185 +0000 UTC m=+1001.548074735" watchObservedRunningTime="2025-11-24 17:20:06.236378315 +0000 UTC m=+1001.559259865" Nov 24 17:20:06 crc kubenswrapper[4760]: I1124 17:20:06.259264 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.25923975 podStartE2EDuration="6.25923975s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:06.249525582 +0000 UTC m=+1001.572407132" watchObservedRunningTime="2025-11-24 17:20:06.25923975 +0000 UTC m=+1001.582121300" Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.236788 4760 generic.go:334] "Generic (PLEG): container finished" podID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerID="d8cc9ef4713673baeac1401c18c3ae281243cf8bd6073ad0a6d78a53971bc3ee" exitCode=0 Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.237087 4760 generic.go:334] "Generic (PLEG): container finished" podID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerID="cbb1a88197730d0367a94b4e8fe999cc8633a676d929854a98d516da4e061991" exitCode=143 Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.236840 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerDied","Data":"d8cc9ef4713673baeac1401c18c3ae281243cf8bd6073ad0a6d78a53971bc3ee"} Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.237147 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerDied","Data":"cbb1a88197730d0367a94b4e8fe999cc8633a676d929854a98d516da4e061991"} Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.239292 4760 generic.go:334] "Generic (PLEG): container finished" podID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerID="b6ee534cf8057eeccdaf62f7717670e80ae4cef7d2f4753d880993816ae073e2" exitCode=0 Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.239309 4760 generic.go:334] "Generic (PLEG): container finished" podID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerID="74c0ce70605e9d81beef6156ca21dce27986514785a5a84e9219d0201ee651d4" exitCode=143 Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.239345 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerDied","Data":"b6ee534cf8057eeccdaf62f7717670e80ae4cef7d2f4753d880993816ae073e2"} Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.239363 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerDied","Data":"74c0ce70605e9d81beef6156ca21dce27986514785a5a84e9219d0201ee651d4"} Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.242125 4760 generic.go:334] "Generic (PLEG): container finished" podID="7fe4a46e-79cc-4a38-b108-aa1069ddf998" containerID="390e1a2dfd64e49424059bac17bb0c826f2e248e2b831d5dafeb3975570fffb2" exitCode=0 Nov 24 17:20:07 crc kubenswrapper[4760]: I1124 17:20:07.242145 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kltrw" event={"ID":"7fe4a46e-79cc-4a38-b108-aa1069ddf998","Type":"ContainerDied","Data":"390e1a2dfd64e49424059bac17bb0c826f2e248e2b831d5dafeb3975570fffb2"} Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.212322 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.256484 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:20:09 crc kubenswrapper[4760]: E1124 17:20:09.256893 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" containerName="init" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.256907 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" containerName="init" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.257359 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4d98020-5f63-49ec-b4b1-cf27c17eb5a3" containerName="init" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.260505 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.264807 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.272149 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.333063 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.350064 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-bc766455b-9dfnr"] Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.351411 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.358108 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bc766455b-9dfnr"] Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.408821 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.408887 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.408955 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.409098 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.409228 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.409255 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7xhx\" (UniqueName: \"kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.409496 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511438 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc1526-eb8d-424b-b03a-784154b5d7fa-logs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511523 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511544 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7xhx\" (UniqueName: \"kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511589 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndsrg\" (UniqueName: \"kubernetes.io/projected/20fc1526-eb8d-424b-b03a-784154b5d7fa-kube-api-access-ndsrg\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511618 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-secret-key\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511766 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-tls-certs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511798 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.511825 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-combined-ca-bundle\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.512162 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.512446 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.513598 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-scripts\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.513659 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.513701 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-config-data\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.512465 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.514415 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.514909 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.517882 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.518522 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.519693 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.520280 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.529066 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7xhx\" (UniqueName: \"kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx\") pod \"horizon-8565878c68-g58n7\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.595682 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616194 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-secret-key\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616259 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-tls-certs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616369 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-combined-ca-bundle\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616455 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-scripts\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616517 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-config-data\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616597 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc1526-eb8d-424b-b03a-784154b5d7fa-logs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616713 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndsrg\" (UniqueName: \"kubernetes.io/projected/20fc1526-eb8d-424b-b03a-784154b5d7fa-kube-api-access-ndsrg\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.616969 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc1526-eb8d-424b-b03a-784154b5d7fa-logs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.623120 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-config-data\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.623660 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-tls-certs\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.623799 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-horizon-secret-key\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.627453 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20fc1526-eb8d-424b-b03a-784154b5d7fa-scripts\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.627480 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc1526-eb8d-424b-b03a-784154b5d7fa-combined-ca-bundle\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.641427 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndsrg\" (UniqueName: \"kubernetes.io/projected/20fc1526-eb8d-424b-b03a-784154b5d7fa-kube-api-access-ndsrg\") pod \"horizon-bc766455b-9dfnr\" (UID: \"20fc1526-eb8d-424b-b03a-784154b5d7fa\") " pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:09 crc kubenswrapper[4760]: I1124 17:20:09.674943 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:11 crc kubenswrapper[4760]: I1124 17:20:11.233626 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:11 crc kubenswrapper[4760]: I1124 17:20:11.320669 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:20:11 crc kubenswrapper[4760]: I1124 17:20:11.320925 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" containerID="cri-o://0410d5251a6c6b2c9d450530fd6b105b023450f7cc1a05f86a666dd233bb0333" gracePeriod=10 Nov 24 17:20:12 crc kubenswrapper[4760]: I1124 17:20:12.298411 4760 generic.go:334] "Generic (PLEG): container finished" podID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerID="0410d5251a6c6b2c9d450530fd6b105b023450f7cc1a05f86a666dd233bb0333" exitCode=0 Nov 24 17:20:12 crc kubenswrapper[4760]: I1124 17:20:12.298455 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" event={"ID":"783a6396-635c-42d2-87b2-3c66d6b2bec0","Type":"ContainerDied","Data":"0410d5251a6c6b2c9d450530fd6b105b023450f7cc1a05f86a666dd233bb0333"} Nov 24 17:20:13 crc kubenswrapper[4760]: I1124 17:20:13.321209 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 24 17:20:18 crc kubenswrapper[4760]: I1124 17:20:18.321808 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.717899 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.718224 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n85h686hbch5d5h588h54h659h7ch595h578h564h66hd7h678h556h565h54ch59dh5fdh5b6h577h58chd6h554h5c9h694h8bh568h66h6bh585h55q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c9wc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5db6ccb479-g865g_openstack(136a5219-09ae-4a0c-a3d6-1007b0818546): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.719131 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.719615 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n7h544hd7hc6h586h9fh588h5bch58bh5fdh5b6h59fh67bhffh8h5d8hfch99h565h7fhdbhd5h68ch565h575h59h5bh56fh5f5hch4h56bq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7lrsl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-85696549bf-k4b6z_openstack(9e2b0ea2-3b39-49a5-b399-2d000325a743): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.721048 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.721343 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f4h68ch674h547h9hb7h687hb8h84h5c7h66h6fh688h85h5b7h79h78h9bh5d8h644h5dch587hbdh5b6h557h545hcbh55dh5b5h55bh59h79q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ds9jc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5b5747d5f-59ddv_openstack(5d36b538-97c1-45a9-9a4e-eb2c6d1eff46): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.721559 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5db6ccb479-g865g" podUID="136a5219-09ae-4a0c-a3d6-1007b0818546" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.722529 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-85696549bf-k4b6z" podUID="9e2b0ea2-3b39-49a5-b399-2d000325a743" Nov 24 17:20:19 crc kubenswrapper[4760]: E1124 17:20:19.723576 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5b5747d5f-59ddv" podUID="5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.827869 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.934977 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4z2c\" (UniqueName: \"kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.935043 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.935102 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.935137 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.935182 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.935205 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts\") pod \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\" (UID: \"7fe4a46e-79cc-4a38-b108-aa1069ddf998\") " Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.942042 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts" (OuterVolumeSpecName: "scripts") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.958919 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.959661 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c" (OuterVolumeSpecName: "kube-api-access-j4z2c") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "kube-api-access-j4z2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.959766 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.963060 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data" (OuterVolumeSpecName: "config-data") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:19 crc kubenswrapper[4760]: I1124 17:20:19.965994 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fe4a46e-79cc-4a38-b108-aa1069ddf998" (UID: "7fe4a46e-79cc-4a38-b108-aa1069ddf998"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038428 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4z2c\" (UniqueName: \"kubernetes.io/projected/7fe4a46e-79cc-4a38-b108-aa1069ddf998-kube-api-access-j4z2c\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038480 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038496 4760 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038512 4760 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038532 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.038546 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fe4a46e-79cc-4a38-b108-aa1069ddf998-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.373840 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kltrw" event={"ID":"7fe4a46e-79cc-4a38-b108-aa1069ddf998","Type":"ContainerDied","Data":"64b19e39b41779da4f745e81dc082316639c9c425ce6ce88c8a04002ebcc5d5b"} Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.373890 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64b19e39b41779da4f745e81dc082316639c9c425ce6ce88c8a04002ebcc5d5b" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.373915 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kltrw" Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.904535 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-kltrw"] Nov 24 17:20:20 crc kubenswrapper[4760]: I1124 17:20:20.914350 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-kltrw"] Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.018206 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qv7gv"] Nov 24 17:20:21 crc kubenswrapper[4760]: E1124 17:20:21.018617 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe4a46e-79cc-4a38-b108-aa1069ddf998" containerName="keystone-bootstrap" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.018632 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe4a46e-79cc-4a38-b108-aa1069ddf998" containerName="keystone-bootstrap" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.018889 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fe4a46e-79cc-4a38-b108-aa1069ddf998" containerName="keystone-bootstrap" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.019727 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.022641 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t62xm" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.022844 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.023273 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.023503 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.023745 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.029598 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qv7gv"] Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.156600 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.156836 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.156975 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.157045 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.157218 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ng6r\" (UniqueName: \"kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.157395 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.259443 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.259703 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.259796 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.259876 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.259988 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ng6r\" (UniqueName: \"kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.260081 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.264712 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.277656 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.278300 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.281201 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ng6r\" (UniqueName: \"kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.290302 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.290951 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle\") pod \"keystone-bootstrap-qv7gv\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.353166 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.385820 4760 generic.go:334] "Generic (PLEG): container finished" podID="1080c24b-f48e-4150-b4cb-c0b1bf1081e3" containerID="77b1a9c3f941b58bd531ae2d0729aad1e639c502dfae3b6abb37ab3aaf918921" exitCode=0 Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.385866 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8x9fd" event={"ID":"1080c24b-f48e-4150-b4cb-c0b1bf1081e3","Type":"ContainerDied","Data":"77b1a9c3f941b58bd531ae2d0729aad1e639c502dfae3b6abb37ab3aaf918921"} Nov 24 17:20:21 crc kubenswrapper[4760]: I1124 17:20:21.477589 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fe4a46e-79cc-4a38-b108-aa1069ddf998" path="/var/lib/kubelet/pods/7fe4a46e-79cc-4a38-b108-aa1069ddf998/volumes" Nov 24 17:20:26 crc kubenswrapper[4760]: E1124 17:20:26.921497 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 24 17:20:26 crc kubenswrapper[4760]: E1124 17:20:26.922191 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n95h5f8h555h68bhcfh5bdh595h67dhbfh5dch7h64bh8h56ch677h68dhb4h5fch574h5d7h5h57h657hb8h648h698h5b4h54bh56chc8hbbh9bq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hpnjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(36365905-cfb1-42e4-8e94-c586e1835c60): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.063439 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.071354 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.076669 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.094659 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.095217 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.118994 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.119989 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.176934 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.176996 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle\") pod \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177034 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key\") pod \"9e2b0ea2-3b39-49a5-b399-2d000325a743\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177059 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177085 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177115 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177149 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177188 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177211 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177255 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177274 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config\") pod \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177297 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lrsl\" (UniqueName: \"kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl\") pod \"9e2b0ea2-3b39-49a5-b399-2d000325a743\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177312 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177330 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177359 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndzff\" (UniqueName: \"kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177378 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs\") pod \"9e2b0ea2-3b39-49a5-b399-2d000325a743\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177393 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q55qw\" (UniqueName: \"kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177415 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjwg7\" (UniqueName: \"kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7\") pod \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\" (UID: \"1080c24b-f48e-4150-b4cb-c0b1bf1081e3\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177458 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177482 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177506 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177530 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts\") pod \"9e2b0ea2-3b39-49a5-b399-2d000325a743\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177552 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\" (UID: \"f91e8f93-94f8-427b-85cc-aaa3b930f03c\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177578 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177611 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177629 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgxn8\" (UniqueName: \"kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177665 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177680 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data\") pod \"9e2b0ea2-3b39-49a5-b399-2d000325a743\" (UID: \"9e2b0ea2-3b39-49a5-b399-2d000325a743\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177704 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.177727 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb\") pod \"783a6396-635c-42d2-87b2-3c66d6b2bec0\" (UID: \"783a6396-635c-42d2-87b2-3c66d6b2bec0\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.178725 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.179048 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs" (OuterVolumeSpecName: "logs") pod "9e2b0ea2-3b39-49a5-b399-2d000325a743" (UID: "9e2b0ea2-3b39-49a5-b399-2d000325a743"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.181557 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs" (OuterVolumeSpecName: "logs") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.182141 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw" (OuterVolumeSpecName: "kube-api-access-q55qw") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "kube-api-access-q55qw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.184709 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7" (OuterVolumeSpecName: "kube-api-access-cjwg7") pod "1080c24b-f48e-4150-b4cb-c0b1bf1081e3" (UID: "1080c24b-f48e-4150-b4cb-c0b1bf1081e3"). InnerVolumeSpecName "kube-api-access-cjwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.185286 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts" (OuterVolumeSpecName: "scripts") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.185549 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs" (OuterVolumeSpecName: "logs") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.185957 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff" (OuterVolumeSpecName: "kube-api-access-ndzff") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "kube-api-access-ndzff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.185969 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.186677 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts" (OuterVolumeSpecName: "scripts") pod "9e2b0ea2-3b39-49a5-b399-2d000325a743" (UID: "9e2b0ea2-3b39-49a5-b399-2d000325a743"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.188474 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data" (OuterVolumeSpecName: "config-data") pod "9e2b0ea2-3b39-49a5-b399-2d000325a743" (UID: "9e2b0ea2-3b39-49a5-b399-2d000325a743"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.189649 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts" (OuterVolumeSpecName: "scripts") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.195044 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8" (OuterVolumeSpecName: "kube-api-access-bgxn8") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "kube-api-access-bgxn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.198497 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl" (OuterVolumeSpecName: "kube-api-access-7lrsl") pod "9e2b0ea2-3b39-49a5-b399-2d000325a743" (UID: "9e2b0ea2-3b39-49a5-b399-2d000325a743"). InnerVolumeSpecName "kube-api-access-7lrsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.207666 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.207879 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "9e2b0ea2-3b39-49a5-b399-2d000325a743" (UID: "9e2b0ea2-3b39-49a5-b399-2d000325a743"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.214191 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.252687 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1080c24b-f48e-4150-b4cb-c0b1bf1081e3" (UID: "1080c24b-f48e-4150-b4cb-c0b1bf1081e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.263671 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.264060 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config" (OuterVolumeSpecName: "config") pod "1080c24b-f48e-4150-b4cb-c0b1bf1081e3" (UID: "1080c24b-f48e-4150-b4cb-c0b1bf1081e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.268491 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.272669 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.277159 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.278576 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data" (OuterVolumeSpecName: "config-data") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.278989 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds9jc\" (UniqueName: \"kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc\") pod \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279069 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts\") pod \"136a5219-09ae-4a0c-a3d6-1007b0818546\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279093 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data\") pod \"136a5219-09ae-4a0c-a3d6-1007b0818546\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279119 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key\") pod \"136a5219-09ae-4a0c-a3d6-1007b0818546\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279170 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs\") pod \"136a5219-09ae-4a0c-a3d6-1007b0818546\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279245 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts\") pod \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279282 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs\") pod \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279302 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") pod \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\" (UID: \"c5dafeef-ad07-4a83-97e8-ab8ae557a002\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279722 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data\") pod \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279765 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key\") pod \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\" (UID: \"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279785 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9wc5\" (UniqueName: \"kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5\") pod \"136a5219-09ae-4a0c-a3d6-1007b0818546\" (UID: \"136a5219-09ae-4a0c-a3d6-1007b0818546\") " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.279900 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs" (OuterVolumeSpecName: "logs") pod "136a5219-09ae-4a0c-a3d6-1007b0818546" (UID: "136a5219-09ae-4a0c-a3d6-1007b0818546"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: W1124 17:20:27.279996 4760 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/c5dafeef-ad07-4a83-97e8-ab8ae557a002/volumes/kubernetes.io~secret/config-data Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280036 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data" (OuterVolumeSpecName: "config-data") pod "c5dafeef-ad07-4a83-97e8-ab8ae557a002" (UID: "c5dafeef-ad07-4a83-97e8-ab8ae557a002"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280128 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts" (OuterVolumeSpecName: "scripts") pod "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" (UID: "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280516 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data" (OuterVolumeSpecName: "config-data") pod "136a5219-09ae-4a0c-a3d6-1007b0818546" (UID: "136a5219-09ae-4a0c-a3d6-1007b0818546"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280312 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndzff\" (UniqueName: \"kubernetes.io/projected/c5dafeef-ad07-4a83-97e8-ab8ae557a002-kube-api-access-ndzff\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280587 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q55qw\" (UniqueName: \"kubernetes.io/projected/f91e8f93-94f8-427b-85cc-aaa3b930f03c-kube-api-access-q55qw\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280602 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e2b0ea2-3b39-49a5-b399-2d000325a743-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280618 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjwg7\" (UniqueName: \"kubernetes.io/projected/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-kube-api-access-cjwg7\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280631 4760 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280644 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/136a5219-09ae-4a0c-a3d6-1007b0818546-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280656 4760 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280776 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280799 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280237 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs" (OuterVolumeSpecName: "logs") pod "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" (UID: "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280834 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280852 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280866 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgxn8\" (UniqueName: \"kubernetes.io/projected/783a6396-635c-42d2-87b2-3c66d6b2bec0-kube-api-access-bgxn8\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280878 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280891 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e2b0ea2-3b39-49a5-b399-2d000325a743-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280903 4760 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280923 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280935 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280948 4760 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e2b0ea2-3b39-49a5-b399-2d000325a743-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280962 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5dafeef-ad07-4a83-97e8-ab8ae557a002-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280973 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280984 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5dafeef-ad07-4a83-97e8-ab8ae557a002-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280995 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f91e8f93-94f8-427b-85cc-aaa3b930f03c-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.281021 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1080c24b-f48e-4150-b4cb-c0b1bf1081e3-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.281033 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lrsl\" (UniqueName: \"kubernetes.io/projected/9e2b0ea2-3b39-49a5-b399-2d000325a743-kube-api-access-7lrsl\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.281046 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280488 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data" (OuterVolumeSpecName: "config-data") pod "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" (UID: "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.280501 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts" (OuterVolumeSpecName: "scripts") pod "136a5219-09ae-4a0c-a3d6-1007b0818546" (UID: "136a5219-09ae-4a0c-a3d6-1007b0818546"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.284922 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc" (OuterVolumeSpecName: "kube-api-access-ds9jc") pod "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" (UID: "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46"). InnerVolumeSpecName "kube-api-access-ds9jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.285507 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "136a5219-09ae-4a0c-a3d6-1007b0818546" (UID: "136a5219-09ae-4a0c-a3d6-1007b0818546"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.287568 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5" (OuterVolumeSpecName: "kube-api-access-c9wc5") pod "136a5219-09ae-4a0c-a3d6-1007b0818546" (UID: "136a5219-09ae-4a0c-a3d6-1007b0818546"). InnerVolumeSpecName "kube-api-access-c9wc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.288159 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" (UID: "5d36b538-97c1-45a9-9a4e-eb2c6d1eff46"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.290704 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.291076 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config" (OuterVolumeSpecName: "config") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.292322 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.301586 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "783a6396-635c-42d2-87b2-3c66d6b2bec0" (UID: "783a6396-635c-42d2-87b2-3c66d6b2bec0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.307943 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.309636 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.311327 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.321777 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data" (OuterVolumeSpecName: "config-data") pod "f91e8f93-94f8-427b-85cc-aaa3b930f03c" (UID: "f91e8f93-94f8-427b-85cc-aaa3b930f03c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.382953 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.382982 4760 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.382992 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9wc5\" (UniqueName: \"kubernetes.io/projected/136a5219-09ae-4a0c-a3d6-1007b0818546-kube-api-access-c9wc5\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383015 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383025 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds9jc\" (UniqueName: \"kubernetes.io/projected/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-kube-api-access-ds9jc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383035 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383044 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383053 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/136a5219-09ae-4a0c-a3d6-1007b0818546-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383061 4760 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/136a5219-09ae-4a0c-a3d6-1007b0818546-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383069 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383077 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383084 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383093 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383101 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383108 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383116 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/783a6396-635c-42d2-87b2-3c66d6b2bec0-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.383124 4760 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f91e8f93-94f8-427b-85cc-aaa3b930f03c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.438582 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8x9fd" event={"ID":"1080c24b-f48e-4150-b4cb-c0b1bf1081e3","Type":"ContainerDied","Data":"2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.438617 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a91c4d134ac440bf47bd4ee140432daead502cf95a883a72de2fb66d1069754" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.438683 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8x9fd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.449920 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85696549bf-k4b6z" event={"ID":"9e2b0ea2-3b39-49a5-b399-2d000325a743","Type":"ContainerDied","Data":"03199d6402cf4771451621f824be0c62cd0f79978bc85a8880a56f866233d711"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.450284 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85696549bf-k4b6z" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.452733 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5747d5f-59ddv" event={"ID":"5d36b538-97c1-45a9-9a4e-eb2c6d1eff46","Type":"ContainerDied","Data":"201ba9a00b37baf8fa6feddafd688163a7541b6ce7938910ed7385049672826e"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.452871 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5747d5f-59ddv" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.458190 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.458205 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f91e8f93-94f8-427b-85cc-aaa3b930f03c","Type":"ContainerDied","Data":"9b309b6506b54ebf876dbbf83562f65de15ce06ff8e98d9b52f8c028fc6815a0"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.460984 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" event={"ID":"783a6396-635c-42d2-87b2-3c66d6b2bec0","Type":"ContainerDied","Data":"f7883474d480dd996f29a191ed8f040afad2696f562886e41bc2dc7cb240fe62"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.461104 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.470349 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.473484 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5db6ccb479-g865g" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.483209 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c5dafeef-ad07-4a83-97e8-ab8ae557a002","Type":"ContainerDied","Data":"d28a38739015ac473eebbd64eb384ab5881a3d6eb142df9972c1d73ce70a8299"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.483318 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5db6ccb479-g865g" event={"ID":"136a5219-09ae-4a0c-a3d6-1007b0818546","Type":"ContainerDied","Data":"de24f54e3dd5dc8eab2fe760a218a8bf6e1dbbba4b4d6f7d3feb799676184bcd"} Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.566461 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.597813 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.631015 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.639314 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640187 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640268 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640336 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640390 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640459 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640514 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640574 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="init" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640634 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="init" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640695 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640742 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640797 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640847 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: E1124 17:20:27.640901 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1080c24b-f48e-4150-b4cb-c0b1bf1081e3" containerName="neutron-db-sync" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.640954 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1080c24b-f48e-4150-b4cb-c0b1bf1081e3" containerName="neutron-db-sync" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641228 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641359 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641512 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" containerName="glance-httpd" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641624 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1080c24b-f48e-4150-b4cb-c0b1bf1081e3" containerName="neutron-db-sync" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641755 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.641857 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" containerName="glance-log" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.643500 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.646299 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85696549bf-k4b6z"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.648645 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2bglw" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.657893 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.658330 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.658503 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.671182 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.709497 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.724038 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5db6ccb479-g865g"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.739244 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.746099 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5b5747d5f-59ddv"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.753276 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.760287 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.767420 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.774675 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-rlzqt"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.780690 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.782248 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.784333 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.787680 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.790537 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.804768 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.804835 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.804873 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.804906 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.804986 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.805041 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.805401 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.805429 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c8hv\" (UniqueName: \"kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907396 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907467 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907501 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c8hv\" (UniqueName: \"kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907530 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907564 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907592 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907616 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907645 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907665 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907693 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907730 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907754 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907777 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p42kz\" (UniqueName: \"kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907798 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907850 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.907880 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.908054 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.908664 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.908744 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.912289 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.912416 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.914482 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.915131 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.940959 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c8hv\" (UniqueName: \"kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.947494 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:20:27 crc kubenswrapper[4760]: I1124 17:20:27.987559 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.009171 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.009612 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.009725 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.009844 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.009985 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.010114 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.010245 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p42kz\" (UniqueName: \"kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.010363 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.010046 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.011410 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.011654 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.018975 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.022544 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.027869 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.037291 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p42kz\" (UniqueName: \"kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.039105 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.043491 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.102069 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.286675 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.293466 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.302230 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.321287 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74f6bcbc87-rlzqt" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.415050 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.416367 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417187 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9gwz\" (UniqueName: \"kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417251 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417311 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417338 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417359 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.417419 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.418834 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.421067 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.421331 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-swvgc" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.423152 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.425120 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519563 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9gwz\" (UniqueName: \"kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519629 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519699 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519723 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkl7k\" (UniqueName: \"kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519768 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.519787 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.520042 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.520061 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.520080 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.520863 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.521101 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.521140 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.521140 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.520273 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.521281 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.521410 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.537997 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9gwz\" (UniqueName: \"kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz\") pod \"dnsmasq-dns-55f844cf75-q7xqh\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.616492 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.622319 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.622347 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.622375 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.622428 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.622481 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkl7k\" (UniqueName: \"kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.626243 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.626876 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.627477 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.628979 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.640254 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkl7k\" (UniqueName: \"kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k\") pod \"neutron-ff79c6b68-gb844\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:28 crc kubenswrapper[4760]: I1124 17:20:28.735145 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.475284 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="136a5219-09ae-4a0c-a3d6-1007b0818546" path="/var/lib/kubelet/pods/136a5219-09ae-4a0c-a3d6-1007b0818546/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.475705 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d36b538-97c1-45a9-9a4e-eb2c6d1eff46" path="/var/lib/kubelet/pods/5d36b538-97c1-45a9-9a4e-eb2c6d1eff46/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.476083 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="783a6396-635c-42d2-87b2-3c66d6b2bec0" path="/var/lib/kubelet/pods/783a6396-635c-42d2-87b2-3c66d6b2bec0/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.476664 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e2b0ea2-3b39-49a5-b399-2d000325a743" path="/var/lib/kubelet/pods/9e2b0ea2-3b39-49a5-b399-2d000325a743/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.477548 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5dafeef-ad07-4a83-97e8-ab8ae557a002" path="/var/lib/kubelet/pods/c5dafeef-ad07-4a83-97e8-ab8ae557a002/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.478387 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f91e8f93-94f8-427b-85cc-aaa3b930f03c" path="/var/lib/kubelet/pods/f91e8f93-94f8-427b-85cc-aaa3b930f03c/volumes" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.567467 4760 scope.go:117] "RemoveContainer" containerID="d8cc9ef4713673baeac1401c18c3ae281243cf8bd6073ad0a6d78a53971bc3ee" Nov 24 17:20:29 crc kubenswrapper[4760]: E1124 17:20:29.599450 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 24 17:20:29 crc kubenswrapper[4760]: E1124 17:20:29.599612 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k4kh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-gszfs_openstack(182b9849-0723-4fa8-bade-df2f05e6cf37): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:20:29 crc kubenswrapper[4760]: E1124 17:20:29.601119 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-gszfs" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.783816 4760 scope.go:117] "RemoveContainer" containerID="cbb1a88197730d0367a94b4e8fe999cc8633a676d929854a98d516da4e061991" Nov 24 17:20:29 crc kubenswrapper[4760]: I1124 17:20:29.885411 4760 scope.go:117] "RemoveContainer" containerID="0410d5251a6c6b2c9d450530fd6b105b023450f7cc1a05f86a666dd233bb0333" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.023543 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.046336 4760 scope.go:117] "RemoveContainer" containerID="a58b25e00e07ff29add1b0379a67278dc07b9c1630bbccdb1f5924c9700af55d" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.084952 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bc766455b-9dfnr"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.202339 4760 scope.go:117] "RemoveContainer" containerID="b6ee534cf8057eeccdaf62f7717670e80ae4cef7d2f4753d880993816ae073e2" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.270510 4760 scope.go:117] "RemoveContainer" containerID="74c0ce70605e9d81beef6156ca21dce27986514785a5a84e9219d0201ee651d4" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.521752 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z6w2f" event={"ID":"195eb4e3-2851-4742-ba6a-48f56b7ac231","Type":"ContainerStarted","Data":"52cf2a810a2d225ce7497def9d47942ca61f2f4a03d4bb7b42768af14d0b56b7"} Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.542840 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f"} Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.552906 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-z6w2f" podStartSLOduration=5.589947692 podStartE2EDuration="30.552889534s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="2025-11-24 17:20:01.977672462 +0000 UTC m=+997.300554012" lastFinishedPulling="2025-11-24 17:20:26.940614304 +0000 UTC m=+1022.263495854" observedRunningTime="2025-11-24 17:20:30.543866186 +0000 UTC m=+1025.866747736" watchObservedRunningTime="2025-11-24 17:20:30.552889534 +0000 UTC m=+1025.875771084" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.555211 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-bkqxk" event={"ID":"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7","Type":"ContainerStarted","Data":"072fda572fe3f02f0334668bd4de8238727b58c9d912850c6bf6edf350d406d6"} Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.581656 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bc766455b-9dfnr" event={"ID":"20fc1526-eb8d-424b-b03a-784154b5d7fa","Type":"ContainerStarted","Data":"0c79c54f5434a976d08ceb4b414c2597ccf26c981841c57b036d5f632214e0a2"} Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.588934 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerStarted","Data":"e66aa640f083e6e01d4f2f0f170fd0fe6182d62849fd524299b8020c407e60d5"} Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.591239 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-bkqxk" podStartSLOduration=5.515654035 podStartE2EDuration="30.591218942s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="2025-11-24 17:20:01.85365201 +0000 UTC m=+997.176533560" lastFinishedPulling="2025-11-24 17:20:26.929216927 +0000 UTC m=+1022.252098467" observedRunningTime="2025-11-24 17:20:30.586657011 +0000 UTC m=+1025.909538581" watchObservedRunningTime="2025-11-24 17:20:30.591218942 +0000 UTC m=+1025.914100492" Nov 24 17:20:30 crc kubenswrapper[4760]: E1124 17:20:30.606128 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-gszfs" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.640815 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.706822 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qv7gv"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.714449 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.791289 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:20:30 crc kubenswrapper[4760]: W1124 17:20:30.918534 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3eea1be1_6bb4_44b1_8f08_b4fc58bafc4a.slice/crio-f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c WatchSource:0}: Error finding container f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c: Status 404 returned error can't find the container with id f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.926271 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7b5b8bc889-kqfhp"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.927788 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.932225 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.932938 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.940850 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b5b8bc889-kqfhp"] Nov 24 17:20:30 crc kubenswrapper[4760]: I1124 17:20:30.983939 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072166 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072480 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7gwj\" (UniqueName: \"kubernetes.io/projected/f4823e15-ce2c-4a16-b80e-f676469b3624-kube-api-access-d7gwj\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072523 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-combined-ca-bundle\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072543 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-httpd-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072614 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-public-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072649 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-internal-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.072670 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-ovndb-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.174906 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.174971 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7gwj\" (UniqueName: \"kubernetes.io/projected/f4823e15-ce2c-4a16-b80e-f676469b3624-kube-api-access-d7gwj\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.175015 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-combined-ca-bundle\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.175036 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-httpd-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.175099 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-public-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.175145 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-internal-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.175168 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-ovndb-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.179130 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-ovndb-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.179447 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.185662 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-internal-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.185959 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-httpd-config\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.186647 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-public-tls-certs\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.187916 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4823e15-ce2c-4a16-b80e-f676469b3624-combined-ca-bundle\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.196695 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7gwj\" (UniqueName: \"kubernetes.io/projected/f4823e15-ce2c-4a16-b80e-f676469b3624-kube-api-access-d7gwj\") pod \"neutron-7b5b8bc889-kqfhp\" (UID: \"f4823e15-ce2c-4a16-b80e-f676469b3624\") " pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.308164 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.520399 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.627307 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerStarted","Data":"482feb73186f30a1eb0e691c9d8d8b842cf9e9c8533a6f1938e17a9996dbc8d4"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.627844 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerStarted","Data":"47ec7b42a839442e2af4f32e96d041f2a9030c6920320a2cac3155cbe3340f4d"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.630914 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qv7gv" event={"ID":"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a","Type":"ContainerStarted","Data":"45238f4028a00df9cda45bab3360589679959357592d16bc6a0c416c828ad2bd"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.630964 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qv7gv" event={"ID":"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a","Type":"ContainerStarted","Data":"f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.632701 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerStarted","Data":"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.636499 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerStarted","Data":"98f678075c2c88ba9f7423add0dc97615ab8f378b739aa511825751c39ae0326"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.638651 4760 generic.go:334] "Generic (PLEG): container finished" podID="43208e79-4554-46bc-a9b1-db65113acaed" containerID="48b33e4f25e1e93ab294fca0d0dec3da158f50a4b88ba757bbbe675ee871ce7e" exitCode=0 Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.638703 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" event={"ID":"43208e79-4554-46bc-a9b1-db65113acaed","Type":"ContainerDied","Data":"48b33e4f25e1e93ab294fca0d0dec3da158f50a4b88ba757bbbe675ee871ce7e"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.638730 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" event={"ID":"43208e79-4554-46bc-a9b1-db65113acaed","Type":"ContainerStarted","Data":"d3a82e71e45ec9f0e8f47d7f25eb1b7647c096b2ec7bcce329772b7f484792e4"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.656612 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bc766455b-9dfnr" event={"ID":"20fc1526-eb8d-424b-b03a-784154b5d7fa","Type":"ContainerStarted","Data":"6d5cf4efe751c8eb50dbb5a3fdcd4c17c3f01285be42fd0d574d76f5f541cceb"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.657338 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qv7gv" podStartSLOduration=11.657321034 podStartE2EDuration="11.657321034s" podCreationTimestamp="2025-11-24 17:20:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:31.652170246 +0000 UTC m=+1026.975051796" watchObservedRunningTime="2025-11-24 17:20:31.657321034 +0000 UTC m=+1026.980202584" Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.659526 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerStarted","Data":"300fb358ba0c6be43300898606c8b8777b6d10c419fd1913a917496607aeec66"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.666105 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerStarted","Data":"5c8b31f23dcf809ae5cf99f6650d661c80ce89f0d065220e8d970e2e3ad1edb6"} Nov 24 17:20:31 crc kubenswrapper[4760]: I1124 17:20:31.878397 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b5b8bc889-kqfhp"] Nov 24 17:20:31 crc kubenswrapper[4760]: W1124 17:20:31.911105 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4823e15_ce2c_4a16_b80e_f676469b3624.slice/crio-491ad3b35b540876200546872924fab4f3b97b6ac1ddeefba7e76dee517ff8b6 WatchSource:0}: Error finding container 491ad3b35b540876200546872924fab4f3b97b6ac1ddeefba7e76dee517ff8b6: Status 404 returned error can't find the container with id 491ad3b35b540876200546872924fab4f3b97b6ac1ddeefba7e76dee517ff8b6 Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.700108 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerStarted","Data":"b6378aeec00227a8350f88f40782b4a4c74ca74596cd4c08ab17a27f891a08e9"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.700674 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerStarted","Data":"7a8ef8a380b7e636ab00608764debb57fbc796e051d126785667d33c452e65f5"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.704927 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerStarted","Data":"dc07ac130418e8e6f6aedcb18da14ecf07b1ab15594caacc36b5427c0004dc3e"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.705408 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.706719 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerStarted","Data":"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.710463 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerStarted","Data":"c2a379271a702638a4dfbf98271ae09ff4f6b0f061b0d6f428825c9673c6a27a"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.727250 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b5b8bc889-kqfhp" event={"ID":"f4823e15-ce2c-4a16-b80e-f676469b3624","Type":"ContainerStarted","Data":"80f837538b2156d30a8d78d94ba4995628ba7640f0b4b4009d9e8e3ddab73fa9"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.727290 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b5b8bc889-kqfhp" event={"ID":"f4823e15-ce2c-4a16-b80e-f676469b3624","Type":"ContainerStarted","Data":"491ad3b35b540876200546872924fab4f3b97b6ac1ddeefba7e76dee517ff8b6"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.728171 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.72816172 podStartE2EDuration="5.72816172s" podCreationTimestamp="2025-11-24 17:20:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:32.724013311 +0000 UTC m=+1028.046894861" watchObservedRunningTime="2025-11-24 17:20:32.72816172 +0000 UTC m=+1028.051043270" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.735315 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" event={"ID":"43208e79-4554-46bc-a9b1-db65113acaed","Type":"ContainerStarted","Data":"e201d9ee7db36b81f4ff32d49bdeed8db2c9dc9d600fb5c4e539c9eb1df82c8b"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.735867 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.741434 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bc766455b-9dfnr" event={"ID":"20fc1526-eb8d-424b-b03a-784154b5d7fa","Type":"ContainerStarted","Data":"d4c64b628c12e9240d463e8d63d51168fd097c90d9a91b801601e8468ec80761"} Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.745745 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-ff79c6b68-gb844" podStartSLOduration=4.745729773 podStartE2EDuration="4.745729773s" podCreationTimestamp="2025-11-24 17:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:32.742951474 +0000 UTC m=+1028.065833014" watchObservedRunningTime="2025-11-24 17:20:32.745729773 +0000 UTC m=+1028.068611323" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.766677 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-8565878c68-g58n7" podStartSLOduration=22.87545091 podStartE2EDuration="23.766662963s" podCreationTimestamp="2025-11-24 17:20:09 +0000 UTC" firstStartedPulling="2025-11-24 17:20:30.154952888 +0000 UTC m=+1025.477834438" lastFinishedPulling="2025-11-24 17:20:31.046164931 +0000 UTC m=+1026.369046491" observedRunningTime="2025-11-24 17:20:32.765402667 +0000 UTC m=+1028.088284217" watchObservedRunningTime="2025-11-24 17:20:32.766662963 +0000 UTC m=+1028.089544513" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.800529 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" podStartSLOduration=4.800512692 podStartE2EDuration="4.800512692s" podCreationTimestamp="2025-11-24 17:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:32.794656244 +0000 UTC m=+1028.117537794" watchObservedRunningTime="2025-11-24 17:20:32.800512692 +0000 UTC m=+1028.123394242" Nov 24 17:20:32 crc kubenswrapper[4760]: I1124 17:20:32.829770 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-bc766455b-9dfnr" podStartSLOduration=22.975227087 podStartE2EDuration="23.82975591s" podCreationTimestamp="2025-11-24 17:20:09 +0000 UTC" firstStartedPulling="2025-11-24 17:20:30.160563588 +0000 UTC m=+1025.483445138" lastFinishedPulling="2025-11-24 17:20:31.015092411 +0000 UTC m=+1026.337973961" observedRunningTime="2025-11-24 17:20:32.823747558 +0000 UTC m=+1028.146629098" watchObservedRunningTime="2025-11-24 17:20:32.82975591 +0000 UTC m=+1028.152637460" Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.766107 4760 generic.go:334] "Generic (PLEG): container finished" podID="195eb4e3-2851-4742-ba6a-48f56b7ac231" containerID="52cf2a810a2d225ce7497def9d47942ca61f2f4a03d4bb7b42768af14d0b56b7" exitCode=0 Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.766203 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z6w2f" event={"ID":"195eb4e3-2851-4742-ba6a-48f56b7ac231","Type":"ContainerDied","Data":"52cf2a810a2d225ce7497def9d47942ca61f2f4a03d4bb7b42768af14d0b56b7"} Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.771163 4760 generic.go:334] "Generic (PLEG): container finished" podID="d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" containerID="072fda572fe3f02f0334668bd4de8238727b58c9d912850c6bf6edf350d406d6" exitCode=0 Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.771270 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-bkqxk" event={"ID":"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7","Type":"ContainerDied","Data":"072fda572fe3f02f0334668bd4de8238727b58c9d912850c6bf6edf350d406d6"} Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.777245 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerStarted","Data":"6bd0b71f5ee0bbaa89d9bcec094f602d762a66a876da1dabbfc75c0fe3c47675"} Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.791891 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b5b8bc889-kqfhp" event={"ID":"f4823e15-ce2c-4a16-b80e-f676469b3624","Type":"ContainerStarted","Data":"b7c6e8c5caf650048d740fec3850bac4f784745375c1495136758b4ce4f7b7ec"} Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.828885 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.828858703 podStartE2EDuration="6.828858703s" podCreationTimestamp="2025-11-24 17:20:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:33.813251976 +0000 UTC m=+1029.136133526" watchObservedRunningTime="2025-11-24 17:20:33.828858703 +0000 UTC m=+1029.151740263" Nov 24 17:20:33 crc kubenswrapper[4760]: I1124 17:20:33.844641 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7b5b8bc889-kqfhp" podStartSLOduration=3.844615784 podStartE2EDuration="3.844615784s" podCreationTimestamp="2025-11-24 17:20:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:33.832178418 +0000 UTC m=+1029.155059968" watchObservedRunningTime="2025-11-24 17:20:33.844615784 +0000 UTC m=+1029.167497334" Nov 24 17:20:34 crc kubenswrapper[4760]: I1124 17:20:34.950878 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:20:35 crc kubenswrapper[4760]: I1124 17:20:35.964642 4760 generic.go:334] "Generic (PLEG): container finished" podID="3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" containerID="45238f4028a00df9cda45bab3360589679959357592d16bc6a0c416c828ad2bd" exitCode=0 Nov 24 17:20:35 crc kubenswrapper[4760]: I1124 17:20:35.964852 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qv7gv" event={"ID":"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a","Type":"ContainerDied","Data":"45238f4028a00df9cda45bab3360589679959357592d16bc6a0c416c828ad2bd"} Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.988308 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-bkqxk" event={"ID":"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7","Type":"ContainerDied","Data":"fa06ad9eddff5deba2fb20bf0d248765bc4257c954b21c6fde642608a19d3b8c"} Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.988814 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa06ad9eddff5deba2fb20bf0d248765bc4257c954b21c6fde642608a19d3b8c" Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.988951 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.988979 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.995207 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qv7gv" event={"ID":"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a","Type":"ContainerDied","Data":"f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c"} Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.995244 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1fe61a3dceb958930a080d95f5c3f15c9d869a4dbd32443f284b155c158769c" Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.997031 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z6w2f" event={"ID":"195eb4e3-2851-4742-ba6a-48f56b7ac231","Type":"ContainerDied","Data":"f7cd11c486a99c41573a3c9a9b7b2c3c812735de872429e4031868b1c2746d8d"} Nov 24 17:20:37 crc kubenswrapper[4760]: I1124 17:20:37.997056 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7cd11c486a99c41573a3c9a9b7b2c3c812735de872429e4031868b1c2746d8d" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.032244 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.048975 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.087344 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.093505 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.101197 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.102811 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.102856 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.164330 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.170379 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208616 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxppc\" (UniqueName: \"kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208679 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data\") pod \"195eb4e3-2851-4742-ba6a-48f56b7ac231\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208709 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle\") pod \"195eb4e3-2851-4742-ba6a-48f56b7ac231\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208758 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208798 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208866 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4qtf\" (UniqueName: \"kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf\") pod \"195eb4e3-2851-4742-ba6a-48f56b7ac231\" (UID: \"195eb4e3-2851-4742-ba6a-48f56b7ac231\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208916 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ng6r\" (UniqueName: \"kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.208953 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209047 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209078 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209107 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209127 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209146 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts\") pod \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\" (UID: \"3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.209166 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.211340 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs" (OuterVolumeSpecName: "logs") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.214688 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "195eb4e3-2851-4742-ba6a-48f56b7ac231" (UID: "195eb4e3-2851-4742-ba6a-48f56b7ac231"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.217650 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf" (OuterVolumeSpecName: "kube-api-access-c4qtf") pod "195eb4e3-2851-4742-ba6a-48f56b7ac231" (UID: "195eb4e3-2851-4742-ba6a-48f56b7ac231"). InnerVolumeSpecName "kube-api-access-c4qtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.219481 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts" (OuterVolumeSpecName: "scripts") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.221981 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.223025 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.223367 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r" (OuterVolumeSpecName: "kube-api-access-4ng6r") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "kube-api-access-4ng6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.237150 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts" (OuterVolumeSpecName: "scripts") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.240498 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc" (OuterVolumeSpecName: "kube-api-access-hxppc") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7"). InnerVolumeSpecName "kube-api-access-hxppc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.243369 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.246902 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "195eb4e3-2851-4742-ba6a-48f56b7ac231" (UID: "195eb4e3-2851-4742-ba6a-48f56b7ac231"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: E1124 17:20:38.250688 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle podName:d21ffd86-bec8-47a2-940e-fc1fcf5d32c7 nodeName:}" failed. No retries permitted until 2025-11-24 17:20:38.750652576 +0000 UTC m=+1034.073534126 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7") : error deleting /var/lib/kubelet/pods/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7/volume-subpaths: remove /var/lib/kubelet/pods/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7/volume-subpaths: no such file or directory Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.252904 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data" (OuterVolumeSpecName: "config-data") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.255381 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data" (OuterVolumeSpecName: "config-data") pod "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" (UID: "3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310774 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4qtf\" (UniqueName: \"kubernetes.io/projected/195eb4e3-2851-4742-ba6a-48f56b7ac231-kube-api-access-c4qtf\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310809 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ng6r\" (UniqueName: \"kubernetes.io/projected/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-kube-api-access-4ng6r\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310817 4760 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310826 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310834 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310846 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310854 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310862 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310873 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxppc\" (UniqueName: \"kubernetes.io/projected/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-kube-api-access-hxppc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310881 4760 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310888 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195eb4e3-2851-4742-ba6a-48f56b7ac231-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310897 4760 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.310905 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.618287 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.675955 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.676539 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="dnsmasq-dns" containerID="cri-o://32827c9c51625aac00a4aa0dd129e4a22aa8eb971254c98717bc20267e5fdc7a" gracePeriod=10 Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.819369 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") pod \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\" (UID: \"d21ffd86-bec8-47a2-940e-fc1fcf5d32c7\") " Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.823645 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" (UID: "d21ffd86-bec8-47a2-940e-fc1fcf5d32c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:38 crc kubenswrapper[4760]: I1124 17:20:38.921429 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.013103 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerStarted","Data":"4ceadab664fbbdc018837f0ec1387ba50226496ced2a45762f73314add03ee9a"} Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.015289 4760 generic.go:334] "Generic (PLEG): container finished" podID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerID="32827c9c51625aac00a4aa0dd129e4a22aa8eb971254c98717bc20267e5fdc7a" exitCode=0 Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.015390 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-bkqxk" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.028387 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" event={"ID":"c84af16b-fab3-4d5c-bb27-4e04ad255e74","Type":"ContainerDied","Data":"32827c9c51625aac00a4aa0dd129e4a22aa8eb971254c98717bc20267e5fdc7a"} Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.028686 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qv7gv" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.028751 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z6w2f" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.029859 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.030318 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.030347 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.030552 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.199223 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.291987 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5ccbbc7984-m6jkp"] Nov 24 17:20:39 crc kubenswrapper[4760]: E1124 17:20:39.292326 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" containerName="keystone-bootstrap" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292341 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" containerName="keystone-bootstrap" Nov 24 17:20:39 crc kubenswrapper[4760]: E1124 17:20:39.292362 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="195eb4e3-2851-4742-ba6a-48f56b7ac231" containerName="barbican-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292368 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="195eb4e3-2851-4742-ba6a-48f56b7ac231" containerName="barbican-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: E1124 17:20:39.292377 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="init" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292382 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="init" Nov 24 17:20:39 crc kubenswrapper[4760]: E1124 17:20:39.292393 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="dnsmasq-dns" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292399 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="dnsmasq-dns" Nov 24 17:20:39 crc kubenswrapper[4760]: E1124 17:20:39.292412 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" containerName="placement-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292418 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" containerName="placement-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292571 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" containerName="dnsmasq-dns" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292584 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" containerName="keystone-bootstrap" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292593 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="195eb4e3-2851-4742-ba6a-48f56b7ac231" containerName="barbican-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.292602 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" containerName="placement-db-sync" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.293126 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.298741 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.298858 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.300600 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.300863 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.301058 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t62xm" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.315348 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5ccbbc7984-m6jkp"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.316478 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.323769 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8cf89787b-dxmqp"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.329327 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.342800 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.343111 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.343226 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.343249 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.343329 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9q7c\" (UniqueName: \"kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.343356 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config\") pod \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\" (UID: \"c84af16b-fab3-4d5c-bb27-4e04ad255e74\") " Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.344761 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.344966 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.345187 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-cx5qk" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.345319 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.345424 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.363227 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c" (OuterVolumeSpecName: "kube-api-access-t9q7c") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "kube-api-access-t9q7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.377589 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8cf89787b-dxmqp"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.445978 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-internal-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446049 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-public-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446080 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412c6295-ae70-4706-9c7b-88c4025c9579-logs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446107 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-combined-ca-bundle\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446126 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-credential-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446144 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-combined-ca-bundle\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446163 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-scripts\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446183 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-fernet-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446202 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jfm2\" (UniqueName: \"kubernetes.io/projected/412c6295-ae70-4706-9c7b-88c4025c9579-kube-api-access-9jfm2\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446260 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-config-data\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446304 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-config-data\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446331 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-scripts\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446370 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-internal-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446418 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-public-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446437 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7znvj\" (UniqueName: \"kubernetes.io/projected/1fd753d3-759a-4734-96ac-9c7f1a9138fa-kube-api-access-7znvj\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.446492 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9q7c\" (UniqueName: \"kubernetes.io/projected/c84af16b-fab3-4d5c-bb27-4e04ad255e74-kube-api-access-t9q7c\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.464141 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-bc9c99c9f-fs95m"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.465688 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.475417 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.475778 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.475881 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-4hdsn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.521605 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.523144 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.542393 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.547812 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550295 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-combined-ca-bundle\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550342 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-public-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550363 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7znvj\" (UniqueName: \"kubernetes.io/projected/1fd753d3-759a-4734-96ac-9c7f1a9138fa-kube-api-access-7znvj\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550383 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-internal-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550400 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550428 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-public-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550448 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412c6295-ae70-4706-9c7b-88c4025c9579-logs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550467 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-combined-ca-bundle\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550481 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-credential-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550500 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-948qg\" (UniqueName: \"kubernetes.io/projected/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-kube-api-access-948qg\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550521 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-combined-ca-bundle\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550536 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-scripts\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550557 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-fernet-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550580 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jfm2\" (UniqueName: \"kubernetes.io/projected/412c6295-ae70-4706-9c7b-88c4025c9579-kube-api-access-9jfm2\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550606 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-config-data\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550652 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-config-data\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550679 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-scripts\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550699 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-logs\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550714 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data-custom\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550733 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550748 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-internal-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550846 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.550863 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.552903 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412c6295-ae70-4706-9c7b-88c4025c9579-logs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.556212 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bc9c99c9f-fs95m"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.560562 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config" (OuterVolumeSpecName: "config") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.567397 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-internal-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.567886 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-combined-ca-bundle\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.570175 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-public-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.570666 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-fernet-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.570968 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-credential-keys\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.571418 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-combined-ca-bundle\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.572886 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-internal-tls-certs\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.573440 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-config-data\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.573723 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-scripts\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.576614 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-public-tls-certs\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.587689 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd753d3-759a-4734-96ac-9c7f1a9138fa-config-data\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.588755 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412c6295-ae70-4706-9c7b-88c4025c9579-scripts\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.588906 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.599417 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.600684 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.603575 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.604953 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.612491 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.615242 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7znvj\" (UniqueName: \"kubernetes.io/projected/1fd753d3-759a-4734-96ac-9c7f1a9138fa-kube-api-access-7znvj\") pod \"keystone-5ccbbc7984-m6jkp\" (UID: \"1fd753d3-759a-4734-96ac-9c7f1a9138fa\") " pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.618825 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jfm2\" (UniqueName: \"kubernetes.io/projected/412c6295-ae70-4706-9c7b-88c4025c9579-kube-api-access-9jfm2\") pod \"placement-8cf89787b-dxmqp\" (UID: \"412c6295-ae70-4706-9c7b-88c4025c9579\") " pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.633776 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.659529 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd4e39bc-5c35-4906-906a-a5558f2861de-logs\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.659812 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-logs\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.659896 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data-custom\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.664205 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-logs\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.664817 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.665027 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-combined-ca-bundle\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.665238 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.665704 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nrlt\" (UniqueName: \"kubernetes.io/projected/bd4e39bc-5c35-4906-906a-a5558f2861de-kube-api-access-4nrlt\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.665825 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-combined-ca-bundle\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.665929 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-948qg\" (UniqueName: \"kubernetes.io/projected/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-kube-api-access-948qg\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.666380 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data-custom\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.666678 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.666759 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.669056 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.674273 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data-custom\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.681695 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-combined-ca-bundle\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.683242 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.686763 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c84af16b-fab3-4d5c-bb27-4e04ad255e74" (UID: "c84af16b-fab3-4d5c-bb27-4e04ad255e74"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.692613 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-948qg\" (UniqueName: \"kubernetes.io/projected/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-kube-api-access-948qg\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.694162 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce07bef-e13b-45e6-ad5e-b7372c3b1432-config-data\") pod \"barbican-worker-bc9c99c9f-fs95m\" (UID: \"4ce07bef-e13b-45e6-ad5e-b7372c3b1432\") " pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.700947 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.701106 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.701133 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.703969 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.719503 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768091 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768145 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768176 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768232 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nrlt\" (UniqueName: \"kubernetes.io/projected/bd4e39bc-5c35-4906-906a-a5558f2861de-kube-api-access-4nrlt\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768256 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-combined-ca-bundle\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768290 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768342 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768358 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768383 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data-custom\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768407 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r2kb\" (UniqueName: \"kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768435 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd4e39bc-5c35-4906-906a-a5558f2861de-logs\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.768494 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84af16b-fab3-4d5c-bb27-4e04ad255e74-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.771893 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd4e39bc-5c35-4906-906a-a5558f2861de-logs\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.776207 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.783786 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-combined-ca-bundle\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.784526 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd4e39bc-5c35-4906-906a-a5558f2861de-config-data-custom\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.794601 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nrlt\" (UniqueName: \"kubernetes.io/projected/bd4e39bc-5c35-4906-906a-a5558f2861de-kube-api-access-4nrlt\") pod \"barbican-keystone-listener-6b6b7c6d54-mkcp9\" (UID: \"bd4e39bc-5c35-4906-906a-a5558f2861de\") " pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.821579 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bc9c99c9f-fs95m" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.869938 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870053 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870105 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7rw7\" (UniqueName: \"kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870131 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870152 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870178 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870230 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r2kb\" (UniqueName: \"kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870297 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870327 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870378 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.870410 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.871665 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.873896 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.875199 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.875483 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.875566 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.883385 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.894195 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r2kb\" (UniqueName: \"kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb\") pod \"dnsmasq-dns-85ff748b95-k79jn\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.908485 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972087 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7rw7\" (UniqueName: \"kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972381 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972503 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972590 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.972902 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.977900 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.983871 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.988618 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:39 crc kubenswrapper[4760]: I1124 17:20:39.989059 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.002454 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7rw7\" (UniqueName: \"kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7\") pod \"barbican-api-56f5fc4ddb-8c4vl\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.040444 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.079885 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.080859 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lkmt5" event={"ID":"c84af16b-fab3-4d5c-bb27-4e04ad255e74","Type":"ContainerDied","Data":"d93eac488f3d17072bca7b7871c82c2f674559c93c7e47be31dcf4c11e919d4c"} Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.080892 4760 scope.go:117] "RemoveContainer" containerID="32827c9c51625aac00a4aa0dd129e4a22aa8eb971254c98717bc20267e5fdc7a" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.169895 4760 scope.go:117] "RemoveContainer" containerID="c170a41a6f262115b60a746628ff265c080282afe870a00c486fa361f52f6a7e" Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.190120 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.224201 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lkmt5"] Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.453247 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8cf89787b-dxmqp"] Nov 24 17:20:40 crc kubenswrapper[4760]: W1124 17:20:40.500738 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod412c6295_ae70_4706_9c7b_88c4025c9579.slice/crio-352b9ff3e07b8f41bc657fff522791e917cf2e175e6274543d0aa7ccc19e5e55 WatchSource:0}: Error finding container 352b9ff3e07b8f41bc657fff522791e917cf2e175e6274543d0aa7ccc19e5e55: Status 404 returned error can't find the container with id 352b9ff3e07b8f41bc657fff522791e917cf2e175e6274543d0aa7ccc19e5e55 Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.597803 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9"] Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.740146 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bc9c99c9f-fs95m"] Nov 24 17:20:40 crc kubenswrapper[4760]: W1124 17:20:40.775573 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce07bef_e13b_45e6_ad5e_b7372c3b1432.slice/crio-42561a3b10043b0f2fb37fbf577a0c975e822681c86e901efe09bfc75a1bcbe0 WatchSource:0}: Error finding container 42561a3b10043b0f2fb37fbf577a0c975e822681c86e901efe09bfc75a1bcbe0: Status 404 returned error can't find the container with id 42561a3b10043b0f2fb37fbf577a0c975e822681c86e901efe09bfc75a1bcbe0 Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.800439 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.829222 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5ccbbc7984-m6jkp"] Nov 24 17:20:40 crc kubenswrapper[4760]: W1124 17:20:40.834110 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fd753d3_759a_4734_96ac_9c7f1a9138fa.slice/crio-ee91a0b2f8a73cb93b739ce7e5ae9a9fc7e04f65be35019b76855969852da506 WatchSource:0}: Error finding container ee91a0b2f8a73cb93b739ce7e5ae9a9fc7e04f65be35019b76855969852da506: Status 404 returned error can't find the container with id ee91a0b2f8a73cb93b739ce7e5ae9a9fc7e04f65be35019b76855969852da506 Nov 24 17:20:40 crc kubenswrapper[4760]: I1124 17:20:40.983734 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.115406 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" event={"ID":"bd4e39bc-5c35-4906-906a-a5558f2861de","Type":"ContainerStarted","Data":"f134d59dfcbee8a110979c8c3f59eec19a5667de756e052a0b4b5f10377bbb97"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.121596 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5ccbbc7984-m6jkp" event={"ID":"1fd753d3-759a-4734-96ac-9c7f1a9138fa","Type":"ContainerStarted","Data":"ee91a0b2f8a73cb93b739ce7e5ae9a9fc7e04f65be35019b76855969852da506"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.138568 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerStarted","Data":"e157d3e89d3acf6d8aecdc22240cbed398fd2888484e886574ee1c11d29031ee"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.140565 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8cf89787b-dxmqp" event={"ID":"412c6295-ae70-4706-9c7b-88c4025c9579","Type":"ContainerStarted","Data":"6110db8594369ce6636b7203840c4ac5d9e7c6a26a5c2298f1d041dbfb7697bc"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.140604 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8cf89787b-dxmqp" event={"ID":"412c6295-ae70-4706-9c7b-88c4025c9579","Type":"ContainerStarted","Data":"352b9ff3e07b8f41bc657fff522791e917cf2e175e6274543d0aa7ccc19e5e55"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.143484 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" event={"ID":"dc92a2be-7821-4214-9e5d-2d4c2a1756b8","Type":"ContainerStarted","Data":"c6c8ceae7d797882b754f79053d5f0c1adc98080f7bc39e03ea4f8d7e58cdf54"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.147809 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.147832 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.148713 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bc9c99c9f-fs95m" event={"ID":"4ce07bef-e13b-45e6-ad5e-b7372c3b1432","Type":"ContainerStarted","Data":"42561a3b10043b0f2fb37fbf577a0c975e822681c86e901efe09bfc75a1bcbe0"} Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.148760 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.148769 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:41 crc kubenswrapper[4760]: I1124 17:20:41.499145 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c84af16b-fab3-4d5c-bb27-4e04ad255e74" path="/var/lib/kubelet/pods/c84af16b-fab3-4d5c-bb27-4e04ad255e74/volumes" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.159820 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerStarted","Data":"37fc4cd4a0ed0c2369523c5f340aa64674fe6aaeced5daa18f5be6b27c957a55"} Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.160144 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerStarted","Data":"0ffe1523cb53f76ec9920b3e1af427958babb7a985c104d214226081568e8203"} Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.160184 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.160205 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.173115 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8cf89787b-dxmqp" event={"ID":"412c6295-ae70-4706-9c7b-88c4025c9579","Type":"ContainerStarted","Data":"35f6f339a037b0015fa0ee245cb0c0bce3c545ded7ab0d5bc93bf5b65e0ebf33"} Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.173893 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.174025 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.196605 4760 generic.go:334] "Generic (PLEG): container finished" podID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerID="166f43c879b19974c5856a4fe763fca6c270f7017cb9ba551badf7c19127071d" exitCode=0 Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.196690 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" event={"ID":"dc92a2be-7821-4214-9e5d-2d4c2a1756b8","Type":"ContainerDied","Data":"166f43c879b19974c5856a4fe763fca6c270f7017cb9ba551badf7c19127071d"} Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.210618 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5ccbbc7984-m6jkp" event={"ID":"1fd753d3-759a-4734-96ac-9c7f1a9138fa","Type":"ContainerStarted","Data":"e985e0a33a59fe7c069e7e53f6bdba7ead0a783a948619c0dcec2cf95bfb29f8"} Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.211674 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.220455 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-8cf89787b-dxmqp" podStartSLOduration=3.220434535 podStartE2EDuration="3.220434535s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:42.209466181 +0000 UTC m=+1037.532347731" watchObservedRunningTime="2025-11-24 17:20:42.220434535 +0000 UTC m=+1037.543316075" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.222483 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" podStartSLOduration=3.222477444 podStartE2EDuration="3.222477444s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:42.191708232 +0000 UTC m=+1037.514589782" watchObservedRunningTime="2025-11-24 17:20:42.222477444 +0000 UTC m=+1037.545358984" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.277857 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5ccbbc7984-m6jkp" podStartSLOduration=3.277837759 podStartE2EDuration="3.277837759s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:42.274859304 +0000 UTC m=+1037.597740854" watchObservedRunningTime="2025-11-24 17:20:42.277837759 +0000 UTC m=+1037.600719319" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.299239 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.299385 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.460190 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.776698 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.777121 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.778030 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.976823 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6b95dd9bc6-5gb75"] Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.978233 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.986294 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 24 17:20:42 crc kubenswrapper[4760]: I1124 17:20:42.986564 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.000973 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6b95dd9bc6-5gb75"] Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046411 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-public-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data-custom\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046515 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-logs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046547 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf7z8\" (UniqueName: \"kubernetes.io/projected/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-kube-api-access-gf7z8\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046568 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-combined-ca-bundle\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046603 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.046629 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-internal-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148538 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf7z8\" (UniqueName: \"kubernetes.io/projected/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-kube-api-access-gf7z8\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148586 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-combined-ca-bundle\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148628 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148659 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-internal-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148700 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-public-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148829 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data-custom\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.148877 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-logs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.152313 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-logs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.155855 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-internal-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.155898 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data-custom\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.163557 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-combined-ca-bundle\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.165186 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-config-data\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.165386 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-public-tls-certs\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.171621 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf7z8\" (UniqueName: \"kubernetes.io/projected/0ad3d643-7c73-4b15-966d-d4c7cb1d2438-kube-api-access-gf7z8\") pod \"barbican-api-6b95dd9bc6-5gb75\" (UID: \"0ad3d643-7c73-4b15-966d-d4c7cb1d2438\") " pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.245376 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" event={"ID":"dc92a2be-7821-4214-9e5d-2d4c2a1756b8","Type":"ContainerStarted","Data":"431a955c05fd285060fbe817b155ffb4b9010e21a78b7521af4efab27116c242"} Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.245876 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.272120 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" podStartSLOduration=4.272097353 podStartE2EDuration="4.272097353s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:43.263448416 +0000 UTC m=+1038.586329966" watchObservedRunningTime="2025-11-24 17:20:43.272097353 +0000 UTC m=+1038.594978903" Nov 24 17:20:43 crc kubenswrapper[4760]: I1124 17:20:43.340243 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:44 crc kubenswrapper[4760]: I1124 17:20:44.263452 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-gszfs" event={"ID":"182b9849-0723-4fa8-bade-df2f05e6cf37","Type":"ContainerStarted","Data":"35786298e1e5ae845548c51613f1f30be1dabf1c0b860e2999ddf37d6501f6aa"} Nov 24 17:20:44 crc kubenswrapper[4760]: I1124 17:20:44.283448 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-gszfs" podStartSLOduration=3.286165095 podStartE2EDuration="44.283432886s" podCreationTimestamp="2025-11-24 17:20:00 +0000 UTC" firstStartedPulling="2025-11-24 17:20:01.386348517 +0000 UTC m=+996.709230067" lastFinishedPulling="2025-11-24 17:20:42.383616308 +0000 UTC m=+1037.706497858" observedRunningTime="2025-11-24 17:20:44.282979683 +0000 UTC m=+1039.605861223" watchObservedRunningTime="2025-11-24 17:20:44.283432886 +0000 UTC m=+1039.606314436" Nov 24 17:20:44 crc kubenswrapper[4760]: I1124 17:20:44.678170 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6b95dd9bc6-5gb75"] Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.279056 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bc9c99c9f-fs95m" event={"ID":"4ce07bef-e13b-45e6-ad5e-b7372c3b1432","Type":"ContainerStarted","Data":"5f7fcd95a4f5d2861fa9579e33007078ddb346fc90fd4a16b447f6dbc3d97267"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.279303 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bc9c99c9f-fs95m" event={"ID":"4ce07bef-e13b-45e6-ad5e-b7372c3b1432","Type":"ContainerStarted","Data":"62318c149f59d40b201c2751b834327585f8468147025c7ebdf1055c074f0e73"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.282995 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" event={"ID":"bd4e39bc-5c35-4906-906a-a5558f2861de","Type":"ContainerStarted","Data":"15036c9fe805d61bf99cbae42d471ae98ae99d95a7406854034cb2528f41ca5f"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.283035 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" event={"ID":"bd4e39bc-5c35-4906-906a-a5558f2861de","Type":"ContainerStarted","Data":"982d8926efb399c1c6fcef70dbd44dd95a3e6cbfae94201b9d3d98e4ddba5df4"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.319104 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-bc9c99c9f-fs95m" podStartSLOduration=2.857358217 podStartE2EDuration="6.319076785s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="2025-11-24 17:20:40.777619125 +0000 UTC m=+1036.100500675" lastFinishedPulling="2025-11-24 17:20:44.239337693 +0000 UTC m=+1039.562219243" observedRunningTime="2025-11-24 17:20:45.304545649 +0000 UTC m=+1040.627427199" watchObservedRunningTime="2025-11-24 17:20:45.319076785 +0000 UTC m=+1040.641958335" Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.333154 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b95dd9bc6-5gb75" event={"ID":"0ad3d643-7c73-4b15-966d-d4c7cb1d2438","Type":"ContainerStarted","Data":"2dc6cdb26801adbd08c7aad6f83b1a4e4cc4ae3770d0cae086396acb03e8a779"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.334664 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b95dd9bc6-5gb75" event={"ID":"0ad3d643-7c73-4b15-966d-d4c7cb1d2438","Type":"ContainerStarted","Data":"286d7c392f29a190790e6b2d793a3e9dee3bb63cdf335d6d58ed4a1a4566ddf6"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.334832 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b95dd9bc6-5gb75" event={"ID":"0ad3d643-7c73-4b15-966d-d4c7cb1d2438","Type":"ContainerStarted","Data":"1fb88d332fc9461cd34a876d35bc2adab78f7bae4c0daf1d6227267fef87861e"} Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.417350 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6b6b7c6d54-mkcp9" podStartSLOduration=2.80926884 podStartE2EDuration="6.417314959s" podCreationTimestamp="2025-11-24 17:20:39 +0000 UTC" firstStartedPulling="2025-11-24 17:20:40.623518861 +0000 UTC m=+1035.946400411" lastFinishedPulling="2025-11-24 17:20:44.23156498 +0000 UTC m=+1039.554446530" observedRunningTime="2025-11-24 17:20:45.381590956 +0000 UTC m=+1040.704472506" watchObservedRunningTime="2025-11-24 17:20:45.417314959 +0000 UTC m=+1040.740196509" Nov 24 17:20:45 crc kubenswrapper[4760]: I1124 17:20:45.420162 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6b95dd9bc6-5gb75" podStartSLOduration=3.42014821 podStartE2EDuration="3.42014821s" podCreationTimestamp="2025-11-24 17:20:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:45.407859208 +0000 UTC m=+1040.730740758" watchObservedRunningTime="2025-11-24 17:20:45.42014821 +0000 UTC m=+1040.743029760" Nov 24 17:20:46 crc kubenswrapper[4760]: I1124 17:20:46.357188 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:46 crc kubenswrapper[4760]: I1124 17:20:46.358390 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:47 crc kubenswrapper[4760]: I1124 17:20:47.351582 4760 generic.go:334] "Generic (PLEG): container finished" podID="182b9849-0723-4fa8-bade-df2f05e6cf37" containerID="35786298e1e5ae845548c51613f1f30be1dabf1c0b860e2999ddf37d6501f6aa" exitCode=0 Nov 24 17:20:47 crc kubenswrapper[4760]: I1124 17:20:47.351664 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-gszfs" event={"ID":"182b9849-0723-4fa8-bade-df2f05e6cf37","Type":"ContainerDied","Data":"35786298e1e5ae845548c51613f1f30be1dabf1c0b860e2999ddf37d6501f6aa"} Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.201714 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268085 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268134 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268198 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268271 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268372 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268368 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268482 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k4kh\" (UniqueName: \"kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh\") pod \"182b9849-0723-4fa8-bade-df2f05e6cf37\" (UID: \"182b9849-0723-4fa8-bade-df2f05e6cf37\") " Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.268945 4760 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/182b9849-0723-4fa8-bade-df2f05e6cf37-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.274328 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh" (OuterVolumeSpecName: "kube-api-access-8k4kh") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "kube-api-access-8k4kh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: E1124 17:20:49.280267 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.287213 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.287649 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts" (OuterVolumeSpecName: "scripts") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.299407 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.327482 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data" (OuterVolumeSpecName: "config-data") pod "182b9849-0723-4fa8-bade-df2f05e6cf37" (UID: "182b9849-0723-4fa8-bade-df2f05e6cf37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.370508 4760 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.370761 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.370855 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k4kh\" (UniqueName: \"kubernetes.io/projected/182b9849-0723-4fa8-bade-df2f05e6cf37-kube-api-access-8k4kh\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.370949 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.371759 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182b9849-0723-4fa8-bade-df2f05e6cf37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.371357 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="ceilometer-notification-agent" containerID="cri-o://5c8b31f23dcf809ae5cf99f6650d661c80ce89f0d065220e8d970e2e3ad1edb6" gracePeriod=30 Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.371594 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="proxy-httpd" containerID="cri-o://7e6f564b7b0d257978f915f514f6e8bd0dcfd51e7f699a3661ffda19e9b6c2c9" gracePeriod=30 Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.371622 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="sg-core" containerID="cri-o://4ceadab664fbbdc018837f0ec1387ba50226496ced2a45762f73314add03ee9a" gracePeriod=30 Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.371272 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerStarted","Data":"7e6f564b7b0d257978f915f514f6e8bd0dcfd51e7f699a3661ffda19e9b6c2c9"} Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.372233 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.378450 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-gszfs" event={"ID":"182b9849-0723-4fa8-bade-df2f05e6cf37","Type":"ContainerDied","Data":"9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67"} Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.378491 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b0d861e29840b082eb3125438a7885a327ca8618252eb4c5371a8ceac23db67" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.378684 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-gszfs" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.603101 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.678109 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-bc766455b-9dfnr" podUID="20fc1526-eb8d-424b-b03a-784154b5d7fa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.680800 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:20:49 crc kubenswrapper[4760]: E1124 17:20:49.681679 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" containerName="cinder-db-sync" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.681702 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" containerName="cinder-db-sync" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.682133 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" containerName="cinder-db-sync" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.683613 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.686953 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-tk7rw" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.687228 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.697688 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.698767 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.737058 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.778051 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.778512 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="dnsmasq-dns" containerID="cri-o://431a955c05fd285060fbe817b155ffb4b9010e21a78b7521af4efab27116c242" gracePeriod=10 Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784451 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784636 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784683 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784780 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784857 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.784941 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.785073 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfnkd\" (UniqueName: \"kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.803137 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.807397 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.820365 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.888868 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.888910 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfnkd\" (UniqueName: \"kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.888947 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.888967 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.888993 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9r25\" (UniqueName: \"kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889039 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889069 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889096 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889138 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889152 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889168 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.889195 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.898020 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.898103 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.898420 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.899986 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.905199 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.913058 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.914524 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.918279 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.929338 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfnkd\" (UniqueName: \"kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd\") pod \"cinder-scheduler-0\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " pod="openstack/cinder-scheduler-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.943118 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.987360 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.160:5353: connect: connection refused" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992059 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992119 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992147 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9r25\" (UniqueName: \"kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992177 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992203 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jm5r\" (UniqueName: \"kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992216 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992274 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992312 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992341 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992380 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992402 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992422 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.992443 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.993311 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:49 crc kubenswrapper[4760]: I1124 17:20:49.993365 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:49.997685 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.001457 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.002138 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.015899 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9r25\" (UniqueName: \"kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25\") pod \"dnsmasq-dns-5c9776ccc5-ttg5w\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.026822 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.095959 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096108 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096149 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096192 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jm5r\" (UniqueName: \"kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096208 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096252 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096291 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.096613 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.101107 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.103514 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.107475 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.107687 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.108988 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.139458 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jm5r\" (UniqueName: \"kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r\") pod \"cinder-api-0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.197504 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.265625 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.438863 4760 generic.go:334] "Generic (PLEG): container finished" podID="36365905-cfb1-42e4-8e94-c586e1835c60" containerID="7e6f564b7b0d257978f915f514f6e8bd0dcfd51e7f699a3661ffda19e9b6c2c9" exitCode=0 Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.438897 4760 generic.go:334] "Generic (PLEG): container finished" podID="36365905-cfb1-42e4-8e94-c586e1835c60" containerID="4ceadab664fbbdc018837f0ec1387ba50226496ced2a45762f73314add03ee9a" exitCode=2 Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.438939 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerDied","Data":"7e6f564b7b0d257978f915f514f6e8bd0dcfd51e7f699a3661ffda19e9b6c2c9"} Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.438964 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerDied","Data":"4ceadab664fbbdc018837f0ec1387ba50226496ced2a45762f73314add03ee9a"} Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.440891 4760 generic.go:334] "Generic (PLEG): container finished" podID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerID="431a955c05fd285060fbe817b155ffb4b9010e21a78b7521af4efab27116c242" exitCode=0 Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.440910 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" event={"ID":"dc92a2be-7821-4214-9e5d-2d4c2a1756b8","Type":"ContainerDied","Data":"431a955c05fd285060fbe817b155ffb4b9010e21a78b7521af4efab27116c242"} Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.440923 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" event={"ID":"dc92a2be-7821-4214-9e5d-2d4c2a1756b8","Type":"ContainerDied","Data":"c6c8ceae7d797882b754f79053d5f0c1adc98080f7bc39e03ea4f8d7e58cdf54"} Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.440933 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6c8ceae7d797882b754f79053d5f0c1adc98080f7bc39e03ea4f8d7e58cdf54" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.463868 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512409 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512522 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512552 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512636 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r2kb\" (UniqueName: \"kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512715 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.512734 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config\") pod \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\" (UID: \"dc92a2be-7821-4214-9e5d-2d4c2a1756b8\") " Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.540205 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb" (OuterVolumeSpecName: "kube-api-access-9r2kb") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "kube-api-access-9r2kb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.593786 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.627864 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.628525 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.633754 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r2kb\" (UniqueName: \"kubernetes.io/projected/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-kube-api-access-9r2kb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.633814 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.636569 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.658630 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.663690 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config" (OuterVolumeSpecName: "config") pod "dc92a2be-7821-4214-9e5d-2d4c2a1756b8" (UID: "dc92a2be-7821-4214-9e5d-2d4c2a1756b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.735043 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.735078 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.735088 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.735099 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc92a2be-7821-4214-9e5d-2d4c2a1756b8-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:50 crc kubenswrapper[4760]: I1124 17:20:50.870940 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.060063 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.449336 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerStarted","Data":"6da2d01fdaa8eeb109ed23f05332caeaeb2e6395a6153ee7e5cefaa1bf10ebfa"} Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.450650 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" event={"ID":"64125b05-f62e-43ea-a1b4-25785686d5e8","Type":"ContainerStarted","Data":"7990c9e951f9402d377359232584e487b2e71a6b992ef1c64d1d3156b5989c43"} Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.450700 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-k79jn" Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.494862 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.503583 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-k79jn"] Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.756355 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:51 crc kubenswrapper[4760]: I1124 17:20:51.884183 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:20:52 crc kubenswrapper[4760]: I1124 17:20:52.103566 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:52 crc kubenswrapper[4760]: I1124 17:20:52.482142 4760 generic.go:334] "Generic (PLEG): container finished" podID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerID="ff8ac5ce76b923515cdfa993e8f5d1872d431ca2b4a92053388f6dcb155e1aea" exitCode=0 Nov 24 17:20:52 crc kubenswrapper[4760]: I1124 17:20:52.482433 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" event={"ID":"64125b05-f62e-43ea-a1b4-25785686d5e8","Type":"ContainerDied","Data":"ff8ac5ce76b923515cdfa993e8f5d1872d431ca2b4a92053388f6dcb155e1aea"} Nov 24 17:20:52 crc kubenswrapper[4760]: I1124 17:20:52.489103 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerStarted","Data":"b343b99fdaf802ab21170b5252529424e4a2fdcd3307822c3673f571bd51c635"} Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.485862 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" path="/var/lib/kubelet/pods/dc92a2be-7821-4214-9e5d-2d4c2a1756b8/volumes" Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.498247 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerStarted","Data":"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070"} Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.514701 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" event={"ID":"64125b05-f62e-43ea-a1b4-25785686d5e8","Type":"ContainerStarted","Data":"0d675610c6c72f67c7f4f0eae825ce80cc26a753b54dff49504ff29734e755fe"} Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.515082 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.518827 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerStarted","Data":"53d5c0675b12f6e40e02567d1e8af2b2f59d1554751412369db3400cf4c0622b"} Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.518941 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api-log" containerID="cri-o://cbcaf786ec5a92cec8987c134fcfbb4b4fbab61ed70cd8814f73cbdcad18af07" gracePeriod=30 Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.519036 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerStarted","Data":"cbcaf786ec5a92cec8987c134fcfbb4b4fbab61ed70cd8814f73cbdcad18af07"} Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.519141 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.519068 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api" containerID="cri-o://53d5c0675b12f6e40e02567d1e8af2b2f59d1554751412369db3400cf4c0622b" gracePeriod=30 Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.541841 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" podStartSLOduration=4.541821393 podStartE2EDuration="4.541821393s" podCreationTimestamp="2025-11-24 17:20:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:53.536333245 +0000 UTC m=+1048.859214795" watchObservedRunningTime="2025-11-24 17:20:53.541821393 +0000 UTC m=+1048.864702943" Nov 24 17:20:53 crc kubenswrapper[4760]: I1124 17:20:53.566870 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.566852669 podStartE2EDuration="4.566852669s" podCreationTimestamp="2025-11-24 17:20:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:20:53.556879994 +0000 UTC m=+1048.879761544" watchObservedRunningTime="2025-11-24 17:20:53.566852669 +0000 UTC m=+1048.889734219" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.532914 4760 generic.go:334] "Generic (PLEG): container finished" podID="36365905-cfb1-42e4-8e94-c586e1835c60" containerID="5c8b31f23dcf809ae5cf99f6650d661c80ce89f0d065220e8d970e2e3ad1edb6" exitCode=0 Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.533030 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerDied","Data":"5c8b31f23dcf809ae5cf99f6650d661c80ce89f0d065220e8d970e2e3ad1edb6"} Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.533643 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"36365905-cfb1-42e4-8e94-c586e1835c60","Type":"ContainerDied","Data":"4bfc7bd14af24f301fcf430a0cf25a81efc000463d33434053048a4c11459a02"} Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.533672 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4bfc7bd14af24f301fcf430a0cf25a81efc000463d33434053048a4c11459a02" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.535600 4760 generic.go:334] "Generic (PLEG): container finished" podID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerID="cbcaf786ec5a92cec8987c134fcfbb4b4fbab61ed70cd8814f73cbdcad18af07" exitCode=143 Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.535668 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerDied","Data":"cbcaf786ec5a92cec8987c134fcfbb4b4fbab61ed70cd8814f73cbdcad18af07"} Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.536723 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.537966 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerStarted","Data":"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c"} Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.576629 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.1074822619999996 podStartE2EDuration="5.576601876s" podCreationTimestamp="2025-11-24 17:20:49 +0000 UTC" firstStartedPulling="2025-11-24 17:20:50.640191693 +0000 UTC m=+1045.963073233" lastFinishedPulling="2025-11-24 17:20:52.109311287 +0000 UTC m=+1047.432192847" observedRunningTime="2025-11-24 17:20:54.574577308 +0000 UTC m=+1049.897458848" watchObservedRunningTime="2025-11-24 17:20:54.576601876 +0000 UTC m=+1049.899483426" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.617874 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618036 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpnjs\" (UniqueName: \"kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618185 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618216 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618272 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618317 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618342 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd\") pod \"36365905-cfb1-42e4-8e94-c586e1835c60\" (UID: \"36365905-cfb1-42e4-8e94-c586e1835c60\") " Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618633 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.618819 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.619926 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.621497 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/36365905-cfb1-42e4-8e94-c586e1835c60-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.628762 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs" (OuterVolumeSpecName: "kube-api-access-hpnjs") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "kube-api-access-hpnjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.628841 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts" (OuterVolumeSpecName: "scripts") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.671101 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.720559 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722089 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data" (OuterVolumeSpecName: "config-data") pod "36365905-cfb1-42e4-8e94-c586e1835c60" (UID: "36365905-cfb1-42e4-8e94-c586e1835c60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722552 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722577 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722587 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722595 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36365905-cfb1-42e4-8e94-c586e1835c60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:54 crc kubenswrapper[4760]: I1124 17:20:54.722606 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpnjs\" (UniqueName: \"kubernetes.io/projected/36365905-cfb1-42e4-8e94-c586e1835c60-kube-api-access-hpnjs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.027702 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.116682 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.132520 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6b95dd9bc6-5gb75" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.246871 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.247111 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api-log" containerID="cri-o://0ffe1523cb53f76ec9920b3e1af427958babb7a985c104d214226081568e8203" gracePeriod=30 Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.247195 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api" containerID="cri-o://37fc4cd4a0ed0c2369523c5f340aa64674fe6aaeced5daa18f5be6b27c957a55" gracePeriod=30 Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.552932 4760 generic.go:334] "Generic (PLEG): container finished" podID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerID="0ffe1523cb53f76ec9920b3e1af427958babb7a985c104d214226081568e8203" exitCode=143 Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.553080 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.553127 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerDied","Data":"0ffe1523cb53f76ec9920b3e1af427958babb7a985c104d214226081568e8203"} Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.607996 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.627552 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.646509 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:55 crc kubenswrapper[4760]: E1124 17:20:55.646933 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="ceilometer-notification-agent" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.646954 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="ceilometer-notification-agent" Nov 24 17:20:55 crc kubenswrapper[4760]: E1124 17:20:55.646983 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="proxy-httpd" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.646993 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="proxy-httpd" Nov 24 17:20:55 crc kubenswrapper[4760]: E1124 17:20:55.647035 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="dnsmasq-dns" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647044 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="dnsmasq-dns" Nov 24 17:20:55 crc kubenswrapper[4760]: E1124 17:20:55.647058 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="init" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647067 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="init" Nov 24 17:20:55 crc kubenswrapper[4760]: E1124 17:20:55.647079 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="sg-core" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647087 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="sg-core" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647302 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="ceilometer-notification-agent" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647326 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="proxy-httpd" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647336 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" containerName="sg-core" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.647360 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc92a2be-7821-4214-9e5d-2d4c2a1756b8" containerName="dnsmasq-dns" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.650550 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.653484 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.653795 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.655555 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747173 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747237 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747261 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747284 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747302 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747338 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvr5c\" (UniqueName: \"kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.747372 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.849411 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.849953 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.850059 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.850151 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.850222 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.850316 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvr5c\" (UniqueName: \"kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.850415 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.853596 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.853688 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.857979 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.859170 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.865439 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.866351 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.870342 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvr5c\" (UniqueName: \"kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c\") pod \"ceilometer-0\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " pod="openstack/ceilometer-0" Nov 24 17:20:55 crc kubenswrapper[4760]: I1124 17:20:55.967871 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:20:56 crc kubenswrapper[4760]: I1124 17:20:56.502728 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:20:56 crc kubenswrapper[4760]: I1124 17:20:56.576713 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerStarted","Data":"666dc2e220ec0a61b83a06147caa4a2740f97d525ea83900c67e50e92ab203e0"} Nov 24 17:20:57 crc kubenswrapper[4760]: I1124 17:20:57.486291 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36365905-cfb1-42e4-8e94-c586e1835c60" path="/var/lib/kubelet/pods/36365905-cfb1-42e4-8e94-c586e1835c60/volumes" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.606862 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerStarted","Data":"15705ae3ab23e4dd7695478548e5160dad8e1d7e69944bddd9422aa1aa8053f3"} Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.609272 4760 generic.go:334] "Generic (PLEG): container finished" podID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerID="37fc4cd4a0ed0c2369523c5f340aa64674fe6aaeced5daa18f5be6b27c957a55" exitCode=0 Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.609311 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerDied","Data":"37fc4cd4a0ed0c2369523c5f340aa64674fe6aaeced5daa18f5be6b27c957a55"} Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.759573 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.817238 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.914622 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle\") pod \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.915085 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data\") pod \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.915129 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs\") pod \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.915364 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7rw7\" (UniqueName: \"kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7\") pod \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.915436 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom\") pod \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\" (UID: \"e0ddccd8-e2cc-4196-a4f4-06139610afd5\") " Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.915745 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs" (OuterVolumeSpecName: "logs") pod "e0ddccd8-e2cc-4196-a4f4-06139610afd5" (UID: "e0ddccd8-e2cc-4196-a4f4-06139610afd5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.916685 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0ddccd8-e2cc-4196-a4f4-06139610afd5-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.920546 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e0ddccd8-e2cc-4196-a4f4-06139610afd5" (UID: "e0ddccd8-e2cc-4196-a4f4-06139610afd5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.925155 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7" (OuterVolumeSpecName: "kube-api-access-t7rw7") pod "e0ddccd8-e2cc-4196-a4f4-06139610afd5" (UID: "e0ddccd8-e2cc-4196-a4f4-06139610afd5"). InnerVolumeSpecName "kube-api-access-t7rw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:20:58 crc kubenswrapper[4760]: I1124 17:20:58.972189 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0ddccd8-e2cc-4196-a4f4-06139610afd5" (UID: "e0ddccd8-e2cc-4196-a4f4-06139610afd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.009615 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data" (OuterVolumeSpecName: "config-data") pod "e0ddccd8-e2cc-4196-a4f4-06139610afd5" (UID: "e0ddccd8-e2cc-4196-a4f4-06139610afd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.018849 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.019017 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7rw7\" (UniqueName: \"kubernetes.io/projected/e0ddccd8-e2cc-4196-a4f4-06139610afd5-kube-api-access-t7rw7\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.019091 4760 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.019171 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0ddccd8-e2cc-4196-a4f4-06139610afd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.632474 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" event={"ID":"e0ddccd8-e2cc-4196-a4f4-06139610afd5","Type":"ContainerDied","Data":"e157d3e89d3acf6d8aecdc22240cbed398fd2888484e886574ee1c11d29031ee"} Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.632780 4760 scope.go:117] "RemoveContainer" containerID="37fc4cd4a0ed0c2369523c5f340aa64674fe6aaeced5daa18f5be6b27c957a55" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.632621 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56f5fc4ddb-8c4vl" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.645632 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerStarted","Data":"953ea00a6e939e422fc60e0b7c483bcc6b7cef20418195ffc2e11d3424b3e78b"} Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.698517 4760 scope.go:117] "RemoveContainer" containerID="0ffe1523cb53f76ec9920b3e1af427958babb7a985c104d214226081568e8203" Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.707592 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:20:59 crc kubenswrapper[4760]: I1124 17:20:59.717699 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-56f5fc4ddb-8c4vl"] Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.200277 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.248608 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.299115 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.299411 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="dnsmasq-dns" containerID="cri-o://e201d9ee7db36b81f4ff32d49bdeed8db2c9dc9d600fb5c4e539c9eb1df82c8b" gracePeriod=10 Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.375413 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.662163 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerStarted","Data":"eb0884a02c8d5ba8a75f6e4ace5a0761f8a08a00468ae0b7c52c6fc687f6845f"} Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.665916 4760 generic.go:334] "Generic (PLEG): container finished" podID="43208e79-4554-46bc-a9b1-db65113acaed" containerID="e201d9ee7db36b81f4ff32d49bdeed8db2c9dc9d600fb5c4e539c9eb1df82c8b" exitCode=0 Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.666191 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="cinder-scheduler" containerID="cri-o://8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070" gracePeriod=30 Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.666554 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" event={"ID":"43208e79-4554-46bc-a9b1-db65113acaed","Type":"ContainerDied","Data":"e201d9ee7db36b81f4ff32d49bdeed8db2c9dc9d600fb5c4e539c9eb1df82c8b"} Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.666898 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="probe" containerID="cri-o://de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c" gracePeriod=30 Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.836475 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.980768 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.981129 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.981165 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.981215 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.981269 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9gwz\" (UniqueName: \"kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.981315 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0\") pod \"43208e79-4554-46bc-a9b1-db65113acaed\" (UID: \"43208e79-4554-46bc-a9b1-db65113acaed\") " Nov 24 17:21:00 crc kubenswrapper[4760]: I1124 17:21:00.993122 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz" (OuterVolumeSpecName: "kube-api-access-r9gwz") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "kube-api-access-r9gwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.057749 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config" (OuterVolumeSpecName: "config") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.083054 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.087442 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.088508 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.088525 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.088535 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.088543 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9gwz\" (UniqueName: \"kubernetes.io/projected/43208e79-4554-46bc-a9b1-db65113acaed-kube-api-access-r9gwz\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.089362 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.094607 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "43208e79-4554-46bc-a9b1-db65113acaed" (UID: "43208e79-4554-46bc-a9b1-db65113acaed"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.190060 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.190106 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43208e79-4554-46bc-a9b1-db65113acaed-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.325771 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7b5b8bc889-kqfhp" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.394523 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.394951 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-ff79c6b68-gb844" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-httpd" containerID="cri-o://dc07ac130418e8e6f6aedcb18da14ecf07b1ab15594caacc36b5427c0004dc3e" gracePeriod=30 Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.394840 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-ff79c6b68-gb844" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-api" containerID="cri-o://482feb73186f30a1eb0e691c9d8d8b842cf9e9c8533a6f1938e17a9996dbc8d4" gracePeriod=30 Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.478079 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" path="/var/lib/kubelet/pods/e0ddccd8-e2cc-4196-a4f4-06139610afd5/volumes" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.573942 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.674593 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" event={"ID":"43208e79-4554-46bc-a9b1-db65113acaed","Type":"ContainerDied","Data":"d3a82e71e45ec9f0e8f47d7f25eb1b7647c096b2ec7bcce329772b7f484792e4"} Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.674633 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-q7xqh" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.674643 4760 scope.go:117] "RemoveContainer" containerID="e201d9ee7db36b81f4ff32d49bdeed8db2c9dc9d600fb5c4e539c9eb1df82c8b" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.680327 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerStarted","Data":"e217bb50d8aad434daf01b74a3664b26ad12589dff342fe1122a9d8e73624523"} Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.681186 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.685430 4760 generic.go:334] "Generic (PLEG): container finished" podID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerID="de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c" exitCode=0 Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.685469 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerDied","Data":"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c"} Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.695999 4760 generic.go:334] "Generic (PLEG): container finished" podID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerID="dc07ac130418e8e6f6aedcb18da14ecf07b1ab15594caacc36b5427c0004dc3e" exitCode=0 Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.696105 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerDied","Data":"dc07ac130418e8e6f6aedcb18da14ecf07b1ab15594caacc36b5427c0004dc3e"} Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.708515 4760 scope.go:117] "RemoveContainer" containerID="48b33e4f25e1e93ab294fca0d0dec3da158f50a4b88ba757bbbe675ee871ce7e" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.708677 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.725440 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-q7xqh"] Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.726612 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.36738747 podStartE2EDuration="6.726596222s" podCreationTimestamp="2025-11-24 17:20:55 +0000 UTC" firstStartedPulling="2025-11-24 17:20:56.495379678 +0000 UTC m=+1051.818261238" lastFinishedPulling="2025-11-24 17:21:00.85458844 +0000 UTC m=+1056.177469990" observedRunningTime="2025-11-24 17:21:01.712168369 +0000 UTC m=+1057.035049919" watchObservedRunningTime="2025-11-24 17:21:01.726596222 +0000 UTC m=+1057.049477772" Nov 24 17:21:01 crc kubenswrapper[4760]: I1124 17:21:01.746636 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:21:02 crc kubenswrapper[4760]: I1124 17:21:02.272966 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.221709 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.426499 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-bc766455b-9dfnr" Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.489546 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43208e79-4554-46bc-a9b1-db65113acaed" path="/var/lib/kubelet/pods/43208e79-4554-46bc-a9b1-db65113acaed/volumes" Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.504333 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.712824 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon-log" containerID="cri-o://00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d" gracePeriod=30 Nov 24 17:21:03 crc kubenswrapper[4760]: I1124 17:21:03.713612 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" containerID="cri-o://88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4" gracePeriod=30 Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.463101 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.558872 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.558966 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.558994 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.559043 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.559080 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.559099 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfnkd\" (UniqueName: \"kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.559350 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data\") pod \"b96c1ccc-bc2a-4391-b631-91c28b189a04\" (UID: \"b96c1ccc-bc2a-4391-b631-91c28b189a04\") " Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.559708 4760 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b96c1ccc-bc2a-4391-b631-91c28b189a04-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.564900 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd" (OuterVolumeSpecName: "kube-api-access-gfnkd") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "kube-api-access-gfnkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.565022 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.582981 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts" (OuterVolumeSpecName: "scripts") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.621091 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.661670 4760 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.661704 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.661718 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.661731 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfnkd\" (UniqueName: \"kubernetes.io/projected/b96c1ccc-bc2a-4391-b631-91c28b189a04-kube-api-access-gfnkd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.684989 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data" (OuterVolumeSpecName: "config-data") pod "b96c1ccc-bc2a-4391-b631-91c28b189a04" (UID: "b96c1ccc-bc2a-4391-b631-91c28b189a04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.723794 4760 generic.go:334] "Generic (PLEG): container finished" podID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerID="8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070" exitCode=0 Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.723853 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerDied","Data":"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070"} Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.723878 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b96c1ccc-bc2a-4391-b631-91c28b189a04","Type":"ContainerDied","Data":"6da2d01fdaa8eeb109ed23f05332caeaeb2e6395a6153ee7e5cefaa1bf10ebfa"} Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.724029 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.724026 4760 scope.go:117] "RemoveContainer" containerID="de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.745576 4760 scope.go:117] "RemoveContainer" containerID="8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.763295 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b96c1ccc-bc2a-4391-b631-91c28b189a04-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.769960 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.778352 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.793345 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794102 4760 scope.go:117] "RemoveContainer" containerID="de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794164 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="probe" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794180 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="probe" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794193 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="dnsmasq-dns" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794200 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="dnsmasq-dns" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794216 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="init" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794222 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="init" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794230 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794236 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794256 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="cinder-scheduler" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794261 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="cinder-scheduler" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.794284 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api-log" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794291 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api-log" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794525 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="probe" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794552 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api-log" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794568 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" containerName="cinder-scheduler" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794582 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="43208e79-4554-46bc-a9b1-db65113acaed" containerName="dnsmasq-dns" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.794596 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0ddccd8-e2cc-4196-a4f4-06139610afd5" containerName="barbican-api" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.795477 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c\": container with ID starting with de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c not found: ID does not exist" containerID="de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.795529 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c"} err="failed to get container status \"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c\": rpc error: code = NotFound desc = could not find container \"de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c\": container with ID starting with de274f5c933516527573c56db5f8d93459235d46573f1d14368a1ab41f8c497c not found: ID does not exist" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.795557 4760 scope.go:117] "RemoveContainer" containerID="8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.795745 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: E1124 17:21:04.795796 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070\": container with ID starting with 8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070 not found: ID does not exist" containerID="8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.795827 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070"} err="failed to get container status \"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070\": rpc error: code = NotFound desc = could not find container \"8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070\": container with ID starting with 8e635837612f82ae0d063c89a160543216718dc0b1c18d8c7059083afa66d070 not found: ID does not exist" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.797801 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.809961 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.864942 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.864993 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-scripts\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.865117 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.865248 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4faf4c98-3f75-4b32-b35d-99e020a71f8c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.865294 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.865328 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twrmb\" (UniqueName: \"kubernetes.io/projected/4faf4c98-3f75-4b32-b35d-99e020a71f8c-kube-api-access-twrmb\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967281 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4faf4c98-3f75-4b32-b35d-99e020a71f8c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967335 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967375 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twrmb\" (UniqueName: \"kubernetes.io/projected/4faf4c98-3f75-4b32-b35d-99e020a71f8c-kube-api-access-twrmb\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967414 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967433 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-scripts\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967461 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.967936 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4faf4c98-3f75-4b32-b35d-99e020a71f8c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.971179 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.971404 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-config-data\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.971952 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.973652 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4faf4c98-3f75-4b32-b35d-99e020a71f8c-scripts\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:04 crc kubenswrapper[4760]: I1124 17:21:04.998122 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twrmb\" (UniqueName: \"kubernetes.io/projected/4faf4c98-3f75-4b32-b35d-99e020a71f8c-kube-api-access-twrmb\") pod \"cinder-scheduler-0\" (UID: \"4faf4c98-3f75-4b32-b35d-99e020a71f8c\") " pod="openstack/cinder-scheduler-0" Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.127972 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.477892 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b96c1ccc-bc2a-4391-b631-91c28b189a04" path="/var/lib/kubelet/pods/b96c1ccc-bc2a-4391-b631-91c28b189a04/volumes" Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.601184 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 17:21:05 crc kubenswrapper[4760]: W1124 17:21:05.605490 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4faf4c98_3f75_4b32_b35d_99e020a71f8c.slice/crio-3cd7f85359efc188ec1600e8e7d563da9939c1a85aa6773e78ecbba5beb9ce52 WatchSource:0}: Error finding container 3cd7f85359efc188ec1600e8e7d563da9939c1a85aa6773e78ecbba5beb9ce52: Status 404 returned error can't find the container with id 3cd7f85359efc188ec1600e8e7d563da9939c1a85aa6773e78ecbba5beb9ce52 Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.743396 4760 generic.go:334] "Generic (PLEG): container finished" podID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerID="482feb73186f30a1eb0e691c9d8d8b842cf9e9c8533a6f1938e17a9996dbc8d4" exitCode=0 Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.743475 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerDied","Data":"482feb73186f30a1eb0e691c9d8d8b842cf9e9c8533a6f1938e17a9996dbc8d4"} Nov 24 17:21:05 crc kubenswrapper[4760]: I1124 17:21:05.749113 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4faf4c98-3f75-4b32-b35d-99e020a71f8c","Type":"ContainerStarted","Data":"3cd7f85359efc188ec1600e8e7d563da9939c1a85aa6773e78ecbba5beb9ce52"} Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.195620 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.307295 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config\") pod \"b7d09082-9811-494f-b9b7-146154ffb7d5\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.307492 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs\") pod \"b7d09082-9811-494f-b9b7-146154ffb7d5\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.307565 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkl7k\" (UniqueName: \"kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k\") pod \"b7d09082-9811-494f-b9b7-146154ffb7d5\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.307643 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config\") pod \"b7d09082-9811-494f-b9b7-146154ffb7d5\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.307705 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle\") pod \"b7d09082-9811-494f-b9b7-146154ffb7d5\" (UID: \"b7d09082-9811-494f-b9b7-146154ffb7d5\") " Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.310914 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b7d09082-9811-494f-b9b7-146154ffb7d5" (UID: "b7d09082-9811-494f-b9b7-146154ffb7d5"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.313892 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k" (OuterVolumeSpecName: "kube-api-access-gkl7k") pod "b7d09082-9811-494f-b9b7-146154ffb7d5" (UID: "b7d09082-9811-494f-b9b7-146154ffb7d5"). InnerVolumeSpecName "kube-api-access-gkl7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.375148 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config" (OuterVolumeSpecName: "config") pod "b7d09082-9811-494f-b9b7-146154ffb7d5" (UID: "b7d09082-9811-494f-b9b7-146154ffb7d5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.386325 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7d09082-9811-494f-b9b7-146154ffb7d5" (UID: "b7d09082-9811-494f-b9b7-146154ffb7d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.407045 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b7d09082-9811-494f-b9b7-146154ffb7d5" (UID: "b7d09082-9811-494f-b9b7-146154ffb7d5"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.410121 4760 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.410153 4760 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.410166 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkl7k\" (UniqueName: \"kubernetes.io/projected/b7d09082-9811-494f-b9b7-146154ffb7d5-kube-api-access-gkl7k\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.410176 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.410184 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d09082-9811-494f-b9b7-146154ffb7d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.765929 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4faf4c98-3f75-4b32-b35d-99e020a71f8c","Type":"ContainerStarted","Data":"3070a1755a3287cdd7933f8f1f5fbec9276348c8257da10225929264c3b2725e"} Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.769781 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ff79c6b68-gb844" event={"ID":"b7d09082-9811-494f-b9b7-146154ffb7d5","Type":"ContainerDied","Data":"47ec7b42a839442e2af4f32e96d041f2a9030c6920320a2cac3155cbe3340f4d"} Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.769825 4760 scope.go:117] "RemoveContainer" containerID="dc07ac130418e8e6f6aedcb18da14ecf07b1ab15594caacc36b5427c0004dc3e" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.770132 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ff79c6b68-gb844" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.791247 4760 scope.go:117] "RemoveContainer" containerID="482feb73186f30a1eb0e691c9d8d8b842cf9e9c8533a6f1938e17a9996dbc8d4" Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.809154 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:21:06 crc kubenswrapper[4760]: I1124 17:21:06.818707 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ff79c6b68-gb844"] Nov 24 17:21:07 crc kubenswrapper[4760]: I1124 17:21:07.478681 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" path="/var/lib/kubelet/pods/b7d09082-9811-494f-b9b7-146154ffb7d5/volumes" Nov 24 17:21:07 crc kubenswrapper[4760]: I1124 17:21:07.788613 4760 generic.go:334] "Generic (PLEG): container finished" podID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerID="88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4" exitCode=0 Nov 24 17:21:07 crc kubenswrapper[4760]: I1124 17:21:07.788685 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerDied","Data":"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4"} Nov 24 17:21:07 crc kubenswrapper[4760]: I1124 17:21:07.802400 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4faf4c98-3f75-4b32-b35d-99e020a71f8c","Type":"ContainerStarted","Data":"c14238ff97fa3afbc761700b9183fe192a0b912d5c196bd52e3cd9628a8a62d4"} Nov 24 17:21:07 crc kubenswrapper[4760]: I1124 17:21:07.840550 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.840529776 podStartE2EDuration="3.840529776s" podCreationTimestamp="2025-11-24 17:21:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:07.824898129 +0000 UTC m=+1063.147779679" watchObservedRunningTime="2025-11-24 17:21:07.840529776 +0000 UTC m=+1063.163411326" Nov 24 17:21:09 crc kubenswrapper[4760]: I1124 17:21:09.597409 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 24 17:21:10 crc kubenswrapper[4760]: I1124 17:21:10.128382 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 17:21:10 crc kubenswrapper[4760]: I1124 17:21:10.797505 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:21:10 crc kubenswrapper[4760]: I1124 17:21:10.872089 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8cf89787b-dxmqp" Nov 24 17:21:11 crc kubenswrapper[4760]: I1124 17:21:11.676680 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5ccbbc7984-m6jkp" Nov 24 17:21:12 crc kubenswrapper[4760]: I1124 17:21:12.999206 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 24 17:21:13 crc kubenswrapper[4760]: E1124 17:21:13.001283 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-api" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.001438 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-api" Nov 24 17:21:13 crc kubenswrapper[4760]: E1124 17:21:13.001545 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-httpd" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.001646 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-httpd" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.002020 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-httpd" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.002161 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d09082-9811-494f-b9b7-146154ffb7d5" containerName="neutron-api" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.002939 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.005642 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-jb9xx" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.010156 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.010365 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.026336 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.140423 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.140477 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.140541 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config-secret\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.140574 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vljnm\" (UniqueName: \"kubernetes.io/projected/dcc1106b-ca31-4432-948b-f01f5f47c370-kube-api-access-vljnm\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.242698 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.242742 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.242785 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config-secret\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.242819 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vljnm\" (UniqueName: \"kubernetes.io/projected/dcc1106b-ca31-4432-948b-f01f5f47c370-kube-api-access-vljnm\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.245256 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.250261 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.258567 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dcc1106b-ca31-4432-948b-f01f5f47c370-openstack-config-secret\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.266934 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vljnm\" (UniqueName: \"kubernetes.io/projected/dcc1106b-ca31-4432-948b-f01f5f47c370-kube-api-access-vljnm\") pod \"openstackclient\" (UID: \"dcc1106b-ca31-4432-948b-f01f5f47c370\") " pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.338079 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.817553 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 17:21:13 crc kubenswrapper[4760]: W1124 17:21:13.825182 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcc1106b_ca31_4432_948b_f01f5f47c370.slice/crio-a307f4b3c8b366829d7c467250e10168dc9026857dd2d86393e20be7d1f58bbd WatchSource:0}: Error finding container a307f4b3c8b366829d7c467250e10168dc9026857dd2d86393e20be7d1f58bbd: Status 404 returned error can't find the container with id a307f4b3c8b366829d7c467250e10168dc9026857dd2d86393e20be7d1f58bbd Nov 24 17:21:13 crc kubenswrapper[4760]: I1124 17:21:13.856303 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dcc1106b-ca31-4432-948b-f01f5f47c370","Type":"ContainerStarted","Data":"a307f4b3c8b366829d7c467250e10168dc9026857dd2d86393e20be7d1f58bbd"} Nov 24 17:21:15 crc kubenswrapper[4760]: I1124 17:21:15.638388 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.956315 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-69f4488969-xwpx8"] Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.958388 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.963306 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.963345 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.963345 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 24 17:21:16 crc kubenswrapper[4760]: I1124 17:21:16.972203 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-69f4488969-xwpx8"] Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.125988 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-public-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126062 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-log-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126092 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-config-data\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126116 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-combined-ca-bundle\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126143 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-927gb\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-kube-api-access-927gb\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126226 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-internal-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126260 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-run-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.126304 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-etc-swift\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228303 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-public-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228364 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-log-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228391 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-config-data\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228418 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-combined-ca-bundle\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228450 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-927gb\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-kube-api-access-927gb\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228491 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-internal-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228530 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-run-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228553 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-etc-swift\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.228999 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-run-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.229028 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37d3f873-9ed8-47d6-b62d-3b007dca3936-log-httpd\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.234252 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-internal-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.238424 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-config-data\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.242769 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-etc-swift\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.243593 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-combined-ca-bundle\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.252409 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d3f873-9ed8-47d6-b62d-3b007dca3936-public-tls-certs\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.287589 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-927gb\" (UniqueName: \"kubernetes.io/projected/37d3f873-9ed8-47d6-b62d-3b007dca3936-kube-api-access-927gb\") pod \"swift-proxy-69f4488969-xwpx8\" (UID: \"37d3f873-9ed8-47d6-b62d-3b007dca3936\") " pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.295203 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:17 crc kubenswrapper[4760]: I1124 17:21:17.913238 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-69f4488969-xwpx8"] Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.636451 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.636922 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-central-agent" containerID="cri-o://15705ae3ab23e4dd7695478548e5160dad8e1d7e69944bddd9422aa1aa8053f3" gracePeriod=30 Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.637018 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="proxy-httpd" containerID="cri-o://e217bb50d8aad434daf01b74a3664b26ad12589dff342fe1122a9d8e73624523" gracePeriod=30 Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.637061 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-notification-agent" containerID="cri-o://953ea00a6e939e422fc60e0b7c483bcc6b7cef20418195ffc2e11d3424b3e78b" gracePeriod=30 Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.637050 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="sg-core" containerID="cri-o://eb0884a02c8d5ba8a75f6e4ace5a0761f8a08a00468ae0b7c52c6fc687f6845f" gracePeriod=30 Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.643106 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.938248 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-69f4488969-xwpx8" event={"ID":"37d3f873-9ed8-47d6-b62d-3b007dca3936","Type":"ContainerStarted","Data":"86f8611632ffdfa4ca3681c389d4dc407b09fdf72ca2f4f167370c219d129fe1"} Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.938300 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-69f4488969-xwpx8" event={"ID":"37d3f873-9ed8-47d6-b62d-3b007dca3936","Type":"ContainerStarted","Data":"52177c6bd3754c097b5cc80bbab0ebd305f196ca179914c7ceb4a57c02ae6682"} Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.938316 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-69f4488969-xwpx8" event={"ID":"37d3f873-9ed8-47d6-b62d-3b007dca3936","Type":"ContainerStarted","Data":"1dcc39e5a05503420aa4501865014df525c17bef364754fb7900ac3937b78649"} Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.938347 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.938368 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.940949 4760 generic.go:334] "Generic (PLEG): container finished" podID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerID="eb0884a02c8d5ba8a75f6e4ace5a0761f8a08a00468ae0b7c52c6fc687f6845f" exitCode=2 Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.940986 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerDied","Data":"eb0884a02c8d5ba8a75f6e4ace5a0761f8a08a00468ae0b7c52c6fc687f6845f"} Nov 24 17:21:18 crc kubenswrapper[4760]: I1124 17:21:18.969679 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-69f4488969-xwpx8" podStartSLOduration=2.969660788 podStartE2EDuration="2.969660788s" podCreationTimestamp="2025-11-24 17:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:18.968035772 +0000 UTC m=+1074.290917332" watchObservedRunningTime="2025-11-24 17:21:18.969660788 +0000 UTC m=+1074.292542338" Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.596763 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.953997 4760 generic.go:334] "Generic (PLEG): container finished" podID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerID="e217bb50d8aad434daf01b74a3664b26ad12589dff342fe1122a9d8e73624523" exitCode=0 Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.954040 4760 generic.go:334] "Generic (PLEG): container finished" podID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerID="953ea00a6e939e422fc60e0b7c483bcc6b7cef20418195ffc2e11d3424b3e78b" exitCode=0 Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.954047 4760 generic.go:334] "Generic (PLEG): container finished" podID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerID="15705ae3ab23e4dd7695478548e5160dad8e1d7e69944bddd9422aa1aa8053f3" exitCode=0 Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.954136 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerDied","Data":"e217bb50d8aad434daf01b74a3664b26ad12589dff342fe1122a9d8e73624523"} Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.954183 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerDied","Data":"953ea00a6e939e422fc60e0b7c483bcc6b7cef20418195ffc2e11d3424b3e78b"} Nov 24 17:21:19 crc kubenswrapper[4760]: I1124 17:21:19.954202 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerDied","Data":"15705ae3ab23e4dd7695478548e5160dad8e1d7e69944bddd9422aa1aa8053f3"} Nov 24 17:21:20 crc kubenswrapper[4760]: I1124 17:21:20.112458 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:20 crc kubenswrapper[4760]: I1124 17:21:20.112695 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-log" containerID="cri-o://7a8ef8a380b7e636ab00608764debb57fbc796e051d126785667d33c452e65f5" gracePeriod=30 Nov 24 17:21:20 crc kubenswrapper[4760]: I1124 17:21:20.112762 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-httpd" containerID="cri-o://b6378aeec00227a8350f88f40782b4a4c74ca74596cd4c08ab17a27f891a08e9" gracePeriod=30 Nov 24 17:21:20 crc kubenswrapper[4760]: I1124 17:21:20.968384 4760 generic.go:334] "Generic (PLEG): container finished" podID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerID="7a8ef8a380b7e636ab00608764debb57fbc796e051d126785667d33c452e65f5" exitCode=143 Nov 24 17:21:20 crc kubenswrapper[4760]: I1124 17:21:20.968448 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerDied","Data":"7a8ef8a380b7e636ab00608764debb57fbc796e051d126785667d33c452e65f5"} Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.068814 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.069072 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-log" containerID="cri-o://c2a379271a702638a4dfbf98271ae09ff4f6b0f061b0d6f428825c9673c6a27a" gracePeriod=30 Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.069128 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-httpd" containerID="cri-o://6bd0b71f5ee0bbaa89d9bcec094f602d762a66a876da1dabbfc75c0fe3c47675" gracePeriod=30 Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.111270 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-sz6nt"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.112331 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.119495 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-sz6nt"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.217878 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-sp75g"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.219119 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.242339 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-sp75g"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.243647 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.243763 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm57v\" (UniqueName: \"kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.315893 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c21f-account-create-s2ss6"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.317160 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.325612 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.329955 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-457ln"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.331241 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.345144 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.345384 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.345478 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55h5z\" (UniqueName: \"kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.345709 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm57v\" (UniqueName: \"kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.346772 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.352860 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c21f-account-create-s2ss6"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.363070 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-457ln"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.368468 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm57v\" (UniqueName: \"kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v\") pod \"nova-api-db-create-sz6nt\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.447346 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.447760 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55h5z\" (UniqueName: \"kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.447864 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgpx7\" (UniqueName: \"kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.448036 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cltq\" (UniqueName: \"kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.448234 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.448320 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.448401 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.449475 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.464680 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55h5z\" (UniqueName: \"kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z\") pod \"nova-cell0-db-create-sp75g\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.517862 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-2801-account-create-79wz9"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.519370 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.521586 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.529134 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2801-account-create-79wz9"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.540974 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.550420 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgpx7\" (UniqueName: \"kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.550517 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cltq\" (UniqueName: \"kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.550565 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.550592 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.551232 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.552033 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.566856 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cltq\" (UniqueName: \"kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq\") pod \"nova-api-c21f-account-create-s2ss6\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.588935 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgpx7\" (UniqueName: \"kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7\") pod \"nova-cell1-db-create-457ln\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.639936 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.652969 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrzbx\" (UniqueName: \"kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.653451 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.659597 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.722391 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-713e-account-create-7lqfc"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.723752 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.725806 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.733427 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-713e-account-create-7lqfc"] Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.755280 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrzbx\" (UniqueName: \"kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.755336 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.756407 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.772631 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrzbx\" (UniqueName: \"kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx\") pod \"nova-cell0-2801-account-create-79wz9\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.843904 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.856824 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.856871 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxrq6\" (UniqueName: \"kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.958678 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.958746 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxrq6\" (UniqueName: \"kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.959375 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:21 crc kubenswrapper[4760]: I1124 17:21:21.978555 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxrq6\" (UniqueName: \"kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6\") pod \"nova-cell1-713e-account-create-7lqfc\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:22 crc kubenswrapper[4760]: I1124 17:21:22.004140 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerID="c2a379271a702638a4dfbf98271ae09ff4f6b0f061b0d6f428825c9673c6a27a" exitCode=143 Nov 24 17:21:22 crc kubenswrapper[4760]: I1124 17:21:22.004219 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerDied","Data":"c2a379271a702638a4dfbf98271ae09ff4f6b0f061b0d6f428825c9673c6a27a"} Nov 24 17:21:22 crc kubenswrapper[4760]: I1124 17:21:22.045854 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:24 crc kubenswrapper[4760]: I1124 17:21:24.026511 4760 generic.go:334] "Generic (PLEG): container finished" podID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerID="b6378aeec00227a8350f88f40782b4a4c74ca74596cd4c08ab17a27f891a08e9" exitCode=0 Nov 24 17:21:24 crc kubenswrapper[4760]: I1124 17:21:24.026579 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerDied","Data":"b6378aeec00227a8350f88f40782b4a4c74ca74596cd4c08ab17a27f891a08e9"} Nov 24 17:21:24 crc kubenswrapper[4760]: I1124 17:21:24.029281 4760 generic.go:334] "Generic (PLEG): container finished" podID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerID="53d5c0675b12f6e40e02567d1e8af2b2f59d1554751412369db3400cf4c0622b" exitCode=137 Nov 24 17:21:24 crc kubenswrapper[4760]: I1124 17:21:24.029325 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerDied","Data":"53d5c0675b12f6e40e02567d1e8af2b2f59d1554751412369db3400cf4c0622b"} Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.040427 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerID="6bd0b71f5ee0bbaa89d9bcec094f602d762a66a876da1dabbfc75c0fe3c47675" exitCode=0 Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.040850 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerDied","Data":"6bd0b71f5ee0bbaa89d9bcec094f602d762a66a876da1dabbfc75c0fe3c47675"} Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.042469 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dcc1106b-ca31-4432-948b-f01f5f47c370","Type":"ContainerStarted","Data":"28f3e82be1dd9a4bb0b58ea820ebbc7daa2f1bd6fa0be915adb10d93890c4267"} Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.073209 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.073368 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.128267537 podStartE2EDuration="13.073352878s" podCreationTimestamp="2025-11-24 17:21:12 +0000 UTC" firstStartedPulling="2025-11-24 17:21:13.826913627 +0000 UTC m=+1069.149795187" lastFinishedPulling="2025-11-24 17:21:24.771998978 +0000 UTC m=+1080.094880528" observedRunningTime="2025-11-24 17:21:25.062683813 +0000 UTC m=+1080.385565353" watchObservedRunningTime="2025-11-24 17:21:25.073352878 +0000 UTC m=+1080.396234428" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.124860 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.124947 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.125025 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.125080 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.125113 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.125217 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.125276 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jm5r\" (UniqueName: \"kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r\") pod \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\" (UID: \"b140b79a-3f9b-4909-bf34-2be905ddf6b0\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.126521 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.128773 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs" (OuterVolumeSpecName: "logs") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.133213 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.133243 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts" (OuterVolumeSpecName: "scripts") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.141436 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r" (OuterVolumeSpecName: "kube-api-access-5jm5r") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "kube-api-access-5jm5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.154279 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.211965 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.222082 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data" (OuterVolumeSpecName: "config-data") pod "b140b79a-3f9b-4909-bf34-2be905ddf6b0" (UID: "b140b79a-3f9b-4909-bf34-2be905ddf6b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.223952 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.227808 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c8hv\" (UniqueName: \"kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.227847 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvr5c\" (UniqueName: \"kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.227927 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.227960 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.227989 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.228034 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.228063 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.228918 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.229296 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs" (OuterVolumeSpecName: "logs") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.236989 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.228127 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237758 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237806 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237836 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237863 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237929 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts\") pod \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\" (UID: \"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.237972 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.238071 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd\") pod \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\" (UID: \"3003350b-62f1-4eb7-b044-bc0e8b007ef5\") " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239565 4760 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239603 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239613 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239625 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jm5r\" (UniqueName: \"kubernetes.io/projected/b140b79a-3f9b-4909-bf34-2be905ddf6b0-kube-api-access-5jm5r\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239656 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239666 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239677 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b140b79a-3f9b-4909-bf34-2be905ddf6b0-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239692 4760 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239700 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b140b79a-3f9b-4909-bf34-2be905ddf6b0-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.239709 4760 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b140b79a-3f9b-4909-bf34-2be905ddf6b0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.242519 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv" (OuterVolumeSpecName: "kube-api-access-9c8hv") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "kube-api-access-9c8hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.245413 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.250032 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c" (OuterVolumeSpecName: "kube-api-access-bvr5c") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "kube-api-access-bvr5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.252804 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.253201 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts" (OuterVolumeSpecName: "scripts") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.255728 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts" (OuterVolumeSpecName: "scripts") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.274673 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.285849 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.334958 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.337682 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341700 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341726 4760 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341736 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341745 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341753 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341762 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341770 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3003350b-62f1-4eb7-b044-bc0e8b007ef5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341777 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c8hv\" (UniqueName: \"kubernetes.io/projected/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-kube-api-access-9c8hv\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341786 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvr5c\" (UniqueName: \"kubernetes.io/projected/3003350b-62f1-4eb7-b044-bc0e8b007ef5-kube-api-access-bvr5c\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.341794 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.349264 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data" (OuterVolumeSpecName: "config-data") pod "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" (UID: "1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.357733 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.373288 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data" (OuterVolumeSpecName: "config-data") pod "3003350b-62f1-4eb7-b044-bc0e8b007ef5" (UID: "3003350b-62f1-4eb7-b044-bc0e8b007ef5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.376964 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2801-account-create-79wz9"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.392738 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.443751 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.443787 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3003350b-62f1-4eb7-b044-bc0e8b007ef5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.443800 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.496989 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-sz6nt"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.497278 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-713e-account-create-7lqfc"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.504063 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-457ln"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.533540 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c21f-account-create-s2ss6"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.539238 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-sp75g"] Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.594422 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 24 17:21:25 crc kubenswrapper[4760]: I1124 17:21:25.594686 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 24 17:21:25 crc kubenswrapper[4760]: W1124 17:21:25.598290 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3b3763f_9cbc_47ec_b70c_cc0fc3f289b2.slice/crio-279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c WatchSource:0}: Error finding container 279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c: Status 404 returned error can't find the container with id 279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.053681 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c21f-account-create-s2ss6" event={"ID":"cdab3c6d-f3c2-4eae-a180-5d7fea562148","Type":"ContainerStarted","Data":"dcdc777ad4d86949f78e793caff4a7f771343e04bc89cebdbf20be1c2976d072"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.057577 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bbc7d5c8-3179-415e-925d-b8cc60152042","Type":"ContainerDied","Data":"300fb358ba0c6be43300898606c8b8777b6d10c419fd1913a917496607aeec66"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.057615 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="300fb358ba0c6be43300898606c8b8777b6d10c419fd1913a917496607aeec66" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.060348 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-457ln" event={"ID":"cf151a5f-3656-4c41-85ac-f1cdedf67f76","Type":"ContainerStarted","Data":"c15cc900b598d000708b09c6801a8ad23ccebeeafd814a93c570aa82dd11261c"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.061873 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sp75g" event={"ID":"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2","Type":"ContainerStarted","Data":"279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.063558 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2801-account-create-79wz9" event={"ID":"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85","Type":"ContainerStarted","Data":"7b88520b6d60ec40bd97a50308d4b2e36b5cfc4b27a80677b3faf35dd7c1a1c6"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.063595 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2801-account-create-79wz9" event={"ID":"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85","Type":"ContainerStarted","Data":"457f4840d96bac8056f775e8e5864faadb558db7d1ddc72732ca43bd79716580"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.069118 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e","Type":"ContainerDied","Data":"98f678075c2c88ba9f7423add0dc97615ab8f378b739aa511825751c39ae0326"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.069166 4760 scope.go:117] "RemoveContainer" containerID="6bd0b71f5ee0bbaa89d9bcec094f602d762a66a876da1dabbfc75c0fe3c47675" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.069186 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.071217 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-sz6nt" event={"ID":"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10","Type":"ContainerStarted","Data":"f614e485bad4affe2b6eff8a05c15f9776104aa4132f72bbb7edd59f1c6a70f8"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.075474 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3003350b-62f1-4eb7-b044-bc0e8b007ef5","Type":"ContainerDied","Data":"666dc2e220ec0a61b83a06147caa4a2740f97d525ea83900c67e50e92ab203e0"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.075534 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.079075 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-713e-account-create-7lqfc" event={"ID":"4be89ae4-3cce-4eea-b760-0759df25aeaf","Type":"ContainerStarted","Data":"41a4ebcfdb522748ccf6bf676fbbeb111b86dacc4fb9faf2bd33601cb32c9a30"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.083791 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-2801-account-create-79wz9" podStartSLOduration=5.083773555 podStartE2EDuration="5.083773555s" podCreationTimestamp="2025-11-24 17:21:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:26.079272006 +0000 UTC m=+1081.402153576" watchObservedRunningTime="2025-11-24 17:21:26.083773555 +0000 UTC m=+1081.406655105" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.088135 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.088760 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b140b79a-3f9b-4909-bf34-2be905ddf6b0","Type":"ContainerDied","Data":"b343b99fdaf802ab21170b5252529424e4a2fdcd3307822c3673f571bd51c635"} Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.266217 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.274865 4760 scope.go:117] "RemoveContainer" containerID="c2a379271a702638a4dfbf98271ae09ff4f6b0f061b0d6f428825c9673c6a27a" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.290276 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.299810 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.320523 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.332077 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360132 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360471 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360482 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360495 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360500 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360509 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360515 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api-log" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360527 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-central-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360532 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-central-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360547 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="sg-core" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360552 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="sg-core" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360564 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360569 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360577 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="proxy-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360582 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="proxy-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360593 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360599 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360608 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360614 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: E1124 17:21:26.360625 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-notification-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360631 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-notification-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360783 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360796 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-central-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360806 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360825 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="ceilometer-notification-agent" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360844 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" containerName="cinder-api-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360852 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="proxy-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360870 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360878 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" containerName="glance-log" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360885 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" containerName="sg-core" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.360892 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" containerName="glance-httpd" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.361757 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.364308 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.372137 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.373523 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.373725 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.391309 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.396938 4760 scope.go:117] "RemoveContainer" containerID="e217bb50d8aad434daf01b74a3664b26ad12589dff342fe1122a9d8e73624523" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.461300 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.463132 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472625 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472740 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472785 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472840 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472883 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472940 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.472990 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473058 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p42kz\" (UniqueName: \"kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473075 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473120 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"bbc7d5c8-3179-415e-925d-b8cc60152042\" (UID: \"bbc7d5c8-3179-415e-925d-b8cc60152042\") " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473309 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473367 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-scripts\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473391 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-logs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473407 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bf8w\" (UniqueName: \"kubernetes.io/projected/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-kube-api-access-7bf8w\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473429 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473461 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473511 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473539 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.473575 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.476903 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.484902 4760 scope.go:117] "RemoveContainer" containerID="eb0884a02c8d5ba8a75f6e4ace5a0761f8a08a00468ae0b7c52c6fc687f6845f" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.485471 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs" (OuterVolumeSpecName: "logs") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.498094 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz" (OuterVolumeSpecName: "kube-api-access-p42kz") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "kube-api-access-p42kz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.502447 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.508555 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts" (OuterVolumeSpecName: "scripts") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.515405 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.528391 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.530511 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.532496 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.532683 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.543367 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.574968 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575047 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575084 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575124 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575156 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575182 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-config-data\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575208 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575232 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575263 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-scripts\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575284 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-logs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575305 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bf8w\" (UniqueName: \"kubernetes.io/projected/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-kube-api-access-7bf8w\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.575328 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.581079 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-scripts\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.583402 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-logs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.597271 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.598945 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.598976 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-logs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599060 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599085 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phk9s\" (UniqueName: \"kubernetes.io/projected/74e069c9-8459-4455-b520-fa8ba79bb677-kube-api-access-phk9s\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599155 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599165 4760 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599175 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p42kz\" (UniqueName: \"kubernetes.io/projected/bbc7d5c8-3179-415e-925d-b8cc60152042-kube-api-access-p42kz\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599183 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbc7d5c8-3179-415e-925d-b8cc60152042-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.599200 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.607961 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.610294 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.611041 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.613310 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-scripts\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.613499 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bf8w\" (UniqueName: \"kubernetes.io/projected/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-kube-api-access-7bf8w\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.613684 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.629783 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.631478 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd87b6ec-9f3d-41ae-9647-6410620a1f4a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fd87b6ec-9f3d-41ae-9647-6410620a1f4a\") " pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.653424 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.656185 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.659632 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data" (OuterVolumeSpecName: "config-data") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.666864 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bbc7d5c8-3179-415e-925d-b8cc60152042" (UID: "bbc7d5c8-3179-415e-925d-b8cc60152042"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.700882 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-logs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.700945 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.700982 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701020 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701038 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnckx\" (UniqueName: \"kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701055 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phk9s\" (UniqueName: \"kubernetes.io/projected/74e069c9-8459-4455-b520-fa8ba79bb677-kube-api-access-phk9s\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701085 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701125 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701161 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701206 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-config-data\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701234 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701272 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701332 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701349 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-scripts\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701366 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701856 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.701936 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.702558 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-logs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.703205 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/74e069c9-8459-4455-b520-fa8ba79bb677-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.703614 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.703748 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.704149 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.704188 4760 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbc7d5c8-3179-415e-925d-b8cc60152042-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.705610 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-scripts\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.707766 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.711146 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-config-data\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.712143 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e069c9-8459-4455-b520-fa8ba79bb677-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.734123 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phk9s\" (UniqueName: \"kubernetes.io/projected/74e069c9-8459-4455-b520-fa8ba79bb677-kube-api-access-phk9s\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.767847 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"74e069c9-8459-4455-b520-fa8ba79bb677\") " pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805760 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805808 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805829 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805868 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805883 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805903 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnckx\" (UniqueName: \"kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.805943 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.806442 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.808340 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.809936 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.814466 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.814686 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.818172 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.818323 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.832303 4760 scope.go:117] "RemoveContainer" containerID="953ea00a6e939e422fc60e0b7c483bcc6b7cef20418195ffc2e11d3424b3e78b" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.838897 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnckx\" (UniqueName: \"kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx\") pod \"ceilometer-0\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.874973 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:26 crc kubenswrapper[4760]: I1124 17:21:26.895497 4760 scope.go:117] "RemoveContainer" containerID="15705ae3ab23e4dd7695478548e5160dad8e1d7e69944bddd9422aa1aa8053f3" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.082427 4760 scope.go:117] "RemoveContainer" containerID="53d5c0675b12f6e40e02567d1e8af2b2f59d1554751412369db3400cf4c0622b" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.140051 4760 scope.go:117] "RemoveContainer" containerID="cbcaf786ec5a92cec8987c134fcfbb4b4fbab61ed70cd8814f73cbdcad18af07" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.142550 4760 generic.go:334] "Generic (PLEG): container finished" podID="cdab3c6d-f3c2-4eae-a180-5d7fea562148" containerID="89214f167dc8a23470347ceb4f6e72f95b0a5e8203cb957182b357b505365a7b" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.142622 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c21f-account-create-s2ss6" event={"ID":"cdab3c6d-f3c2-4eae-a180-5d7fea562148","Type":"ContainerDied","Data":"89214f167dc8a23470347ceb4f6e72f95b0a5e8203cb957182b357b505365a7b"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.145111 4760 generic.go:334] "Generic (PLEG): container finished" podID="66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" containerID="7b88520b6d60ec40bd97a50308d4b2e36b5cfc4b27a80677b3faf35dd7c1a1c6" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.145159 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2801-account-create-79wz9" event={"ID":"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85","Type":"ContainerDied","Data":"7b88520b6d60ec40bd97a50308d4b2e36b5cfc4b27a80677b3faf35dd7c1a1c6"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.154298 4760 generic.go:334] "Generic (PLEG): container finished" podID="7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" containerID="e1ca4488952d3e9c07a18aafc349ec3e3459f1ecde5d71083e26821008e6d8b3" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.154441 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-sz6nt" event={"ID":"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10","Type":"ContainerDied","Data":"e1ca4488952d3e9c07a18aafc349ec3e3459f1ecde5d71083e26821008e6d8b3"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.164338 4760 generic.go:334] "Generic (PLEG): container finished" podID="4be89ae4-3cce-4eea-b760-0759df25aeaf" containerID="f8ca0b9ea6e99f0a254332e211aa436f353811e80b73a09287daaae7f7db7c64" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.164399 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-713e-account-create-7lqfc" event={"ID":"4be89ae4-3cce-4eea-b760-0759df25aeaf","Type":"ContainerDied","Data":"f8ca0b9ea6e99f0a254332e211aa436f353811e80b73a09287daaae7f7db7c64"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.165773 4760 generic.go:334] "Generic (PLEG): container finished" podID="cf151a5f-3656-4c41-85ac-f1cdedf67f76" containerID="9b91d2c71656b73469b6636aa25fcb341195df85c3a801b171d580d3a9b28675" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.165818 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-457ln" event={"ID":"cf151a5f-3656-4c41-85ac-f1cdedf67f76","Type":"ContainerDied","Data":"9b91d2c71656b73469b6636aa25fcb341195df85c3a801b171d580d3a9b28675"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.170263 4760 generic.go:334] "Generic (PLEG): container finished" podID="f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" containerID="a7ee9b9f1e8fc50b7296aa9aaf461510a9b7493d7d00759dcd0df1dc177e1866" exitCode=0 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.170322 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sp75g" event={"ID":"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2","Type":"ContainerDied","Data":"a7ee9b9f1e8fc50b7296aa9aaf461510a9b7493d7d00759dcd0df1dc177e1866"} Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.170404 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.221062 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.261115 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.270531 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.307386 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.309272 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.313152 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.313811 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.316508 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.319295 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-69f4488969-xwpx8" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.320774 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429740 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429789 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlnw9\" (UniqueName: \"kubernetes.io/projected/8531a189-02f3-4e03-8fca-ff113990ee3e-kube-api-access-qlnw9\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429840 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429874 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429899 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429949 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429968 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.429997 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-logs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.459150 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: W1124 17:21:27.463124 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74e069c9_8459_4455_b520_fa8ba79bb677.slice/crio-4326036fbaeaed03b6963be4f926648ee86c744953a94cb71121a24903f19aa4 WatchSource:0}: Error finding container 4326036fbaeaed03b6963be4f926648ee86c744953a94cb71121a24903f19aa4: Status 404 returned error can't find the container with id 4326036fbaeaed03b6963be4f926648ee86c744953a94cb71121a24903f19aa4 Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.481506 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e" path="/var/lib/kubelet/pods/1c8ce1a7-102a-4f2c-b3ce-c8bfcd35006e/volumes" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.482909 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3003350b-62f1-4eb7-b044-bc0e8b007ef5" path="/var/lib/kubelet/pods/3003350b-62f1-4eb7-b044-bc0e8b007ef5/volumes" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.485100 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b140b79a-3f9b-4909-bf34-2be905ddf6b0" path="/var/lib/kubelet/pods/b140b79a-3f9b-4909-bf34-2be905ddf6b0/volumes" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.486160 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbc7d5c8-3179-415e-925d-b8cc60152042" path="/var/lib/kubelet/pods/bbc7d5c8-3179-415e-925d-b8cc60152042/volumes" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.513507 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531044 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531097 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531117 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531170 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531188 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531217 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-logs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531258 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531279 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlnw9\" (UniqueName: \"kubernetes.io/projected/8531a189-02f3-4e03-8fca-ff113990ee3e-kube-api-access-qlnw9\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.531641 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.538396 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-logs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.539594 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8531a189-02f3-4e03-8fca-ff113990ee3e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.540110 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.557975 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.559446 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.560060 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8531a189-02f3-4e03-8fca-ff113990ee3e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.563347 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlnw9\" (UniqueName: \"kubernetes.io/projected/8531a189-02f3-4e03-8fca-ff113990ee3e-kube-api-access-qlnw9\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.582543 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"8531a189-02f3-4e03-8fca-ff113990ee3e\") " pod="openstack/glance-default-external-api-0" Nov 24 17:21:27 crc kubenswrapper[4760]: I1124 17:21:27.631345 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.148303 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.180285 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fd87b6ec-9f3d-41ae-9647-6410620a1f4a","Type":"ContainerStarted","Data":"031b3b574dbfa1876dd304085e5c99e4fbc9c0d27b7c85007e3a49bbb7fd7b1e"} Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.180328 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fd87b6ec-9f3d-41ae-9647-6410620a1f4a","Type":"ContainerStarted","Data":"d46278568ec310b8e738d1ba9f8cf63d066499be3f4578a48949e2000b18decd"} Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.181355 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8531a189-02f3-4e03-8fca-ff113990ee3e","Type":"ContainerStarted","Data":"5c4252e93f4f4b81477ff287af603e91e7044b4f6ac6c8741fc25d8326d22cc8"} Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.184492 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"74e069c9-8459-4455-b520-fa8ba79bb677","Type":"ContainerStarted","Data":"4326036fbaeaed03b6963be4f926648ee86c744953a94cb71121a24903f19aa4"} Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.187657 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerStarted","Data":"bbcc8356b44f98c3f148bec5a43ae5bcafa6245f0ee851cc3b3cfc5eaf991a6d"} Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.658256 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.770956 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxrq6\" (UniqueName: \"kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6\") pod \"4be89ae4-3cce-4eea-b760-0759df25aeaf\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.771118 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts\") pod \"4be89ae4-3cce-4eea-b760-0759df25aeaf\" (UID: \"4be89ae4-3cce-4eea-b760-0759df25aeaf\") " Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.771942 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4be89ae4-3cce-4eea-b760-0759df25aeaf" (UID: "4be89ae4-3cce-4eea-b760-0759df25aeaf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.775670 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6" (OuterVolumeSpecName: "kube-api-access-pxrq6") pod "4be89ae4-3cce-4eea-b760-0759df25aeaf" (UID: "4be89ae4-3cce-4eea-b760-0759df25aeaf"). InnerVolumeSpecName "kube-api-access-pxrq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.799587 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.875591 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4be89ae4-3cce-4eea-b760-0759df25aeaf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.875626 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxrq6\" (UniqueName: \"kubernetes.io/projected/4be89ae4-3cce-4eea-b760-0759df25aeaf-kube-api-access-pxrq6\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.951413 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.971421 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.977701 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts\") pod \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.977793 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55h5z\" (UniqueName: \"kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z\") pod \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\" (UID: \"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2\") " Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.980726 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" (UID: "f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.981515 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:28 crc kubenswrapper[4760]: I1124 17:21:28.986431 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z" (OuterVolumeSpecName: "kube-api-access-55h5z") pod "f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" (UID: "f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2"). InnerVolumeSpecName "kube-api-access-55h5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.034493 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.082800 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cltq\" (UniqueName: \"kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq\") pod \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.082973 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts\") pod \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.083486 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" (UID: "7e2f485a-8ed4-4ba6-a35b-9e5788d46e10"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.083557 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts\") pod \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\" (UID: \"cdab3c6d-f3c2-4eae-a180-5d7fea562148\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.083617 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rm57v\" (UniqueName: \"kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v\") pod \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\" (UID: \"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.083945 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cdab3c6d-f3c2-4eae-a180-5d7fea562148" (UID: "cdab3c6d-f3c2-4eae-a180-5d7fea562148"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.083979 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts\") pod \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084046 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrzbx\" (UniqueName: \"kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx\") pod \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\" (UID: \"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084281 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" (UID: "66dd0d71-2f7f-485a-9cd7-7b2e84a49a85"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084638 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084666 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55h5z\" (UniqueName: \"kubernetes.io/projected/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2-kube-api-access-55h5z\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084680 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdab3c6d-f3c2-4eae-a180-5d7fea562148-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.084691 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.087567 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx" (OuterVolumeSpecName: "kube-api-access-zrzbx") pod "66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" (UID: "66dd0d71-2f7f-485a-9cd7-7b2e84a49a85"). InnerVolumeSpecName "kube-api-access-zrzbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.089500 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v" (OuterVolumeSpecName: "kube-api-access-rm57v") pod "7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" (UID: "7e2f485a-8ed4-4ba6-a35b-9e5788d46e10"). InnerVolumeSpecName "kube-api-access-rm57v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.090434 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq" (OuterVolumeSpecName: "kube-api-access-4cltq") pod "cdab3c6d-f3c2-4eae-a180-5d7fea562148" (UID: "cdab3c6d-f3c2-4eae-a180-5d7fea562148"). InnerVolumeSpecName "kube-api-access-4cltq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.120952 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.185407 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgpx7\" (UniqueName: \"kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7\") pod \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.185504 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts\") pod \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\" (UID: \"cf151a5f-3656-4c41-85ac-f1cdedf67f76\") " Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.186160 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrzbx\" (UniqueName: \"kubernetes.io/projected/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85-kube-api-access-zrzbx\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.186173 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cltq\" (UniqueName: \"kubernetes.io/projected/cdab3c6d-f3c2-4eae-a180-5d7fea562148-kube-api-access-4cltq\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.186182 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rm57v\" (UniqueName: \"kubernetes.io/projected/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10-kube-api-access-rm57v\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.186478 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cf151a5f-3656-4c41-85ac-f1cdedf67f76" (UID: "cf151a5f-3656-4c41-85ac-f1cdedf67f76"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.191378 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7" (OuterVolumeSpecName: "kube-api-access-tgpx7") pod "cf151a5f-3656-4c41-85ac-f1cdedf67f76" (UID: "cf151a5f-3656-4c41-85ac-f1cdedf67f76"). InnerVolumeSpecName "kube-api-access-tgpx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.211843 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-sz6nt" event={"ID":"7e2f485a-8ed4-4ba6-a35b-9e5788d46e10","Type":"ContainerDied","Data":"f614e485bad4affe2b6eff8a05c15f9776104aa4132f72bbb7edd59f1c6a70f8"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.211884 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f614e485bad4affe2b6eff8a05c15f9776104aa4132f72bbb7edd59f1c6a70f8" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.212134 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-sz6nt" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.219149 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.220471 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"74e069c9-8459-4455-b520-fa8ba79bb677","Type":"ContainerStarted","Data":"ce43cfa27fd23885a5282c64e42bfe85febfccb745d3a55b7207d0836bdcd047"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.228309 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-457ln" event={"ID":"cf151a5f-3656-4c41-85ac-f1cdedf67f76","Type":"ContainerDied","Data":"c15cc900b598d000708b09c6801a8ad23ccebeeafd814a93c570aa82dd11261c"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.228347 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c15cc900b598d000708b09c6801a8ad23ccebeeafd814a93c570aa82dd11261c" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.228324 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-457ln" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.235386 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c21f-account-create-s2ss6" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.235436 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c21f-account-create-s2ss6" event={"ID":"cdab3c6d-f3c2-4eae-a180-5d7fea562148","Type":"ContainerDied","Data":"dcdc777ad4d86949f78e793caff4a7f771343e04bc89cebdbf20be1c2976d072"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.235480 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcdc777ad4d86949f78e793caff4a7f771343e04bc89cebdbf20be1c2976d072" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.243124 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.243102894 podStartE2EDuration="3.243102894s" podCreationTimestamp="2025-11-24 17:21:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:29.236911716 +0000 UTC m=+1084.559793266" watchObservedRunningTime="2025-11-24 17:21:29.243102894 +0000 UTC m=+1084.565984444" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.247893 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerStarted","Data":"40c08033e7e6361877f7015fac435adc5df8d5ab0ce84ce30e959bfa7ad8a044"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.247948 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerStarted","Data":"3f597afd83c8fdf678eaa9f577fb6105ec12b5879c5e18addd446412c5a6ffbd"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.252878 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2801-account-create-79wz9" event={"ID":"66dd0d71-2f7f-485a-9cd7-7b2e84a49a85","Type":"ContainerDied","Data":"457f4840d96bac8056f775e8e5864faadb558db7d1ddc72732ca43bd79716580"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.252911 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="457f4840d96bac8056f775e8e5864faadb558db7d1ddc72732ca43bd79716580" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.252973 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2801-account-create-79wz9" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.265667 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-713e-account-create-7lqfc" event={"ID":"4be89ae4-3cce-4eea-b760-0759df25aeaf","Type":"ContainerDied","Data":"41a4ebcfdb522748ccf6bf676fbbeb111b86dacc4fb9faf2bd33601cb32c9a30"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.265712 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41a4ebcfdb522748ccf6bf676fbbeb111b86dacc4fb9faf2bd33601cb32c9a30" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.265715 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-713e-account-create-7lqfc" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.269257 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sp75g" event={"ID":"f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2","Type":"ContainerDied","Data":"279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c"} Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.269293 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="279c6d6d8043e78d1cc2a37bce7d36fb37eff9c2e6ed0714373d15e37d031a1c" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.269353 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sp75g" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.287524 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgpx7\" (UniqueName: \"kubernetes.io/projected/cf151a5f-3656-4c41-85ac-f1cdedf67f76-kube-api-access-tgpx7\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.287544 4760 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf151a5f-3656-4c41-85ac-f1cdedf67f76-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.596341 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8565878c68-g58n7" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.596697 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:21:29 crc kubenswrapper[4760]: I1124 17:21:29.907945 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.280998 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8531a189-02f3-4e03-8fca-ff113990ee3e","Type":"ContainerStarted","Data":"8e3afaafa200791a51454ca3f7c5b97e5c8905232c1ad96c5445e0a8194f10a6"} Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.281335 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8531a189-02f3-4e03-8fca-ff113990ee3e","Type":"ContainerStarted","Data":"76c0cc70d273714cb4a19e3fa4caf0d267c92bcb72eb498b0b861b48e9fbf9eb"} Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.283671 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerStarted","Data":"bc05728e7c3825a606d95c20aa7a1b731f3611cbf71cdf35b28a7a2b50b32328"} Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.285786 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fd87b6ec-9f3d-41ae-9647-6410620a1f4a","Type":"ContainerStarted","Data":"528008a7692177f695957135d3a98ec2a32503af22560b08918f59953a22de5c"} Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.289076 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"74e069c9-8459-4455-b520-fa8ba79bb677","Type":"ContainerStarted","Data":"6ee2dfb588940bc811dd1bf85cc38c8e1d8dbb8f3009e109888f33066c0b8644"} Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.308379 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.30836096 podStartE2EDuration="3.30836096s" podCreationTimestamp="2025-11-24 17:21:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:30.307430574 +0000 UTC m=+1085.630312124" watchObservedRunningTime="2025-11-24 17:21:30.30836096 +0000 UTC m=+1085.631242500" Nov 24 17:21:30 crc kubenswrapper[4760]: I1124 17:21:30.335028 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.334998603 podStartE2EDuration="4.334998603s" podCreationTimestamp="2025-11-24 17:21:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:30.329916298 +0000 UTC m=+1085.652797848" watchObservedRunningTime="2025-11-24 17:21:30.334998603 +0000 UTC m=+1085.657880153" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.300704 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-central-agent" containerID="cri-o://3f597afd83c8fdf678eaa9f577fb6105ec12b5879c5e18addd446412c5a6ffbd" gracePeriod=30 Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.301114 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerStarted","Data":"690df8bd91c070610ea14d2e9fba485eba86bc416ceef4df5b98df46adc37bdf"} Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.302283 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.302520 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="proxy-httpd" containerID="cri-o://690df8bd91c070610ea14d2e9fba485eba86bc416ceef4df5b98df46adc37bdf" gracePeriod=30 Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.302582 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="sg-core" containerID="cri-o://bc05728e7c3825a606d95c20aa7a1b731f3611cbf71cdf35b28a7a2b50b32328" gracePeriod=30 Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.302615 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-notification-agent" containerID="cri-o://40c08033e7e6361877f7015fac435adc5df8d5ab0ce84ce30e959bfa7ad8a044" gracePeriod=30 Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.326415 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.231206465 podStartE2EDuration="5.326402306s" podCreationTimestamp="2025-11-24 17:21:26 +0000 UTC" firstStartedPulling="2025-11-24 17:21:27.569073522 +0000 UTC m=+1082.891955072" lastFinishedPulling="2025-11-24 17:21:30.664269363 +0000 UTC m=+1085.987150913" observedRunningTime="2025-11-24 17:21:31.320645251 +0000 UTC m=+1086.643526811" watchObservedRunningTime="2025-11-24 17:21:31.326402306 +0000 UTC m=+1086.649283856" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839275 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jxp48"] Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839679 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839703 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839718 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839727 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839741 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf151a5f-3656-4c41-85ac-f1cdedf67f76" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839750 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf151a5f-3656-4c41-85ac-f1cdedf67f76" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839770 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839778 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839788 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdab3c6d-f3c2-4eae-a180-5d7fea562148" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839796 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdab3c6d-f3c2-4eae-a180-5d7fea562148" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: E1124 17:21:31.839811 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4be89ae4-3cce-4eea-b760-0759df25aeaf" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.839818 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4be89ae4-3cce-4eea-b760-0759df25aeaf" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840055 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840079 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdab3c6d-f3c2-4eae-a180-5d7fea562148" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840101 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4be89ae4-3cce-4eea-b760-0759df25aeaf" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840114 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840126 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf151a5f-3656-4c41-85ac-f1cdedf67f76" containerName="mariadb-database-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840141 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" containerName="mariadb-account-create" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.840821 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.846075 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.846312 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-p9zv6" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.846517 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.866409 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jxp48"] Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.939825 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.939923 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.940857 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7rs7\" (UniqueName: \"kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:31 crc kubenswrapper[4760]: I1124 17:21:31.941061 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.042751 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.043133 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7rs7\" (UniqueName: \"kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.043205 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.043295 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.049052 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.049667 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.060716 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.063046 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7rs7\" (UniqueName: \"kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7\") pod \"nova-cell0-conductor-db-sync-jxp48\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.157786 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342393 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerID="690df8bd91c070610ea14d2e9fba485eba86bc416ceef4df5b98df46adc37bdf" exitCode=0 Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342630 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerID="bc05728e7c3825a606d95c20aa7a1b731f3611cbf71cdf35b28a7a2b50b32328" exitCode=2 Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342639 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerID="40c08033e7e6361877f7015fac435adc5df8d5ab0ce84ce30e959bfa7ad8a044" exitCode=0 Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342508 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerDied","Data":"690df8bd91c070610ea14d2e9fba485eba86bc416ceef4df5b98df46adc37bdf"} Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342675 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerDied","Data":"bc05728e7c3825a606d95c20aa7a1b731f3611cbf71cdf35b28a7a2b50b32328"} Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.342689 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerDied","Data":"40c08033e7e6361877f7015fac435adc5df8d5ab0ce84ce30e959bfa7ad8a044"} Nov 24 17:21:32 crc kubenswrapper[4760]: I1124 17:21:32.414294 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jxp48"] Nov 24 17:21:32 crc kubenswrapper[4760]: W1124 17:21:32.424549 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79b8dc02_4f92_415c_be5c_0822ff170919.slice/crio-a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da WatchSource:0}: Error finding container a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da: Status 404 returned error can't find the container with id a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da Nov 24 17:21:33 crc kubenswrapper[4760]: E1124 17:21:33.295989 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:21:33 crc kubenswrapper[4760]: I1124 17:21:33.367181 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jxp48" event={"ID":"79b8dc02-4f92-415c-be5c-0822ff170919","Type":"ContainerStarted","Data":"a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da"} Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.122253 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.184882 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7xhx\" (UniqueName: \"kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185228 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185272 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185359 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185444 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185472 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.185518 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data\") pod \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\" (UID: \"b2a0d3e8-0ad1-4397-abb3-0b0074b13103\") " Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.186272 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs" (OuterVolumeSpecName: "logs") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.192609 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx" (OuterVolumeSpecName: "kube-api-access-g7xhx") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "kube-api-access-g7xhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.199144 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.225299 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data" (OuterVolumeSpecName: "config-data") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.228523 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.235900 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts" (OuterVolumeSpecName: "scripts") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.288926 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.288967 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.288983 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.288995 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7xhx\" (UniqueName: \"kubernetes.io/projected/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-kube-api-access-g7xhx\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.289023 4760 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.289034 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.295588 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "b2a0d3e8-0ad1-4397-abb3-0b0074b13103" (UID: "b2a0d3e8-0ad1-4397-abb3-0b0074b13103"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.381278 4760 generic.go:334] "Generic (PLEG): container finished" podID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerID="00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d" exitCode=137 Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.381321 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8565878c68-g58n7" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.381328 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerDied","Data":"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d"} Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.381413 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8565878c68-g58n7" event={"ID":"b2a0d3e8-0ad1-4397-abb3-0b0074b13103","Type":"ContainerDied","Data":"e66aa640f083e6e01d4f2f0f170fd0fe6182d62849fd524299b8020c407e60d5"} Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.381460 4760 scope.go:117] "RemoveContainer" containerID="88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.390622 4760 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2a0d3e8-0ad1-4397-abb3-0b0074b13103-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.420850 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.426924 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8565878c68-g58n7"] Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.580129 4760 scope.go:117] "RemoveContainer" containerID="00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.602636 4760 scope.go:117] "RemoveContainer" containerID="88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4" Nov 24 17:21:34 crc kubenswrapper[4760]: E1124 17:21:34.602961 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4\": container with ID starting with 88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4 not found: ID does not exist" containerID="88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.602994 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4"} err="failed to get container status \"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4\": rpc error: code = NotFound desc = could not find container \"88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4\": container with ID starting with 88b091bc8064b135ac299cd5761704d0ff2338591cf1cef327f959da55f662f4 not found: ID does not exist" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.603031 4760 scope.go:117] "RemoveContainer" containerID="00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d" Nov 24 17:21:34 crc kubenswrapper[4760]: E1124 17:21:34.603761 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d\": container with ID starting with 00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d not found: ID does not exist" containerID="00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d" Nov 24 17:21:34 crc kubenswrapper[4760]: I1124 17:21:34.603802 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d"} err="failed to get container status \"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d\": rpc error: code = NotFound desc = could not find container \"00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d\": container with ID starting with 00b884116993533e24122a27a321555a615f56844bd1a5a80dbe19688491356d not found: ID does not exist" Nov 24 17:21:35 crc kubenswrapper[4760]: I1124 17:21:35.484029 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" path="/var/lib/kubelet/pods/b2a0d3e8-0ad1-4397-abb3-0b0074b13103/volumes" Nov 24 17:21:36 crc kubenswrapper[4760]: I1124 17:21:36.809224 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:36 crc kubenswrapper[4760]: I1124 17:21:36.809637 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:36 crc kubenswrapper[4760]: I1124 17:21:36.841440 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:36 crc kubenswrapper[4760]: I1124 17:21:36.855942 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.407613 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.407819 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.632047 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.632091 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.667307 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 17:21:37 crc kubenswrapper[4760]: I1124 17:21:37.683054 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 17:21:38 crc kubenswrapper[4760]: I1124 17:21:38.419210 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 17:21:38 crc kubenswrapper[4760]: I1124 17:21:38.419588 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 17:21:38 crc kubenswrapper[4760]: I1124 17:21:38.770188 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.426908 4760 generic.go:334] "Generic (PLEG): container finished" podID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerID="3f597afd83c8fdf678eaa9f577fb6105ec12b5879c5e18addd446412c5a6ffbd" exitCode=0 Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.427851 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerDied","Data":"3f597afd83c8fdf678eaa9f577fb6105ec12b5879c5e18addd446412c5a6ffbd"} Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.427887 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.427903 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.491763 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:39 crc kubenswrapper[4760]: I1124 17:21:39.546617 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 17:21:40 crc kubenswrapper[4760]: I1124 17:21:40.670137 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 17:21:40 crc kubenswrapper[4760]: I1124 17:21:40.670235 4760 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 17:21:40 crc kubenswrapper[4760]: I1124 17:21:40.679855 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.426653 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.494219 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c8fb97f-21ba-4875-8f92-90eb2639d942","Type":"ContainerDied","Data":"bbcc8356b44f98c3f148bec5a43ae5bcafa6245f0ee851cc3b3cfc5eaf991a6d"} Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.495122 4760 scope.go:117] "RemoveContainer" containerID="690df8bd91c070610ea14d2e9fba485eba86bc416ceef4df5b98df46adc37bdf" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.494686 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.526438 4760 scope.go:117] "RemoveContainer" containerID="bc05728e7c3825a606d95c20aa7a1b731f3611cbf71cdf35b28a7a2b50b32328" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528189 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528280 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528301 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528428 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528450 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528490 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.528546 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnckx\" (UniqueName: \"kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx\") pod \"1c8fb97f-21ba-4875-8f92-90eb2639d942\" (UID: \"1c8fb97f-21ba-4875-8f92-90eb2639d942\") " Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.529573 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.529652 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.535625 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx" (OuterVolumeSpecName: "kube-api-access-dnckx") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "kube-api-access-dnckx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.544179 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts" (OuterVolumeSpecName: "scripts") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.575780 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.630620 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.630792 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnckx\" (UniqueName: \"kubernetes.io/projected/1c8fb97f-21ba-4875-8f92-90eb2639d942-kube-api-access-dnckx\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.630878 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.630979 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.631119 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c8fb97f-21ba-4875-8f92-90eb2639d942-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.646742 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.668916 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data" (OuterVolumeSpecName: "config-data") pod "1c8fb97f-21ba-4875-8f92-90eb2639d942" (UID: "1c8fb97f-21ba-4875-8f92-90eb2639d942"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.732273 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.732312 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8fb97f-21ba-4875-8f92-90eb2639d942-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.746355 4760 scope.go:117] "RemoveContainer" containerID="40c08033e7e6361877f7015fac435adc5df8d5ab0ce84ce30e959bfa7ad8a044" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.774950 4760 scope.go:117] "RemoveContainer" containerID="3f597afd83c8fdf678eaa9f577fb6105ec12b5879c5e18addd446412c5a6ffbd" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.825916 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.834778 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862380 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.862849 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="proxy-httpd" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862870 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="proxy-httpd" Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.862891 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-central-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862903 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-central-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.862917 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862925 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.862948 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-notification-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862956 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-notification-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.862976 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="sg-core" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.862985 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="sg-core" Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.863021 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon-log" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863029 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon-log" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863246 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-central-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863261 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="ceilometer-notification-agent" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863275 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon-log" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863297 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="proxy-httpd" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863311 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2a0d3e8-0ad1-4397-abb3-0b0074b13103" containerName="horizon" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.863328 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" containerName="sg-core" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.865741 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.868800 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.878776 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.879205 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935174 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935227 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935257 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvs8f\" (UniqueName: \"kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935277 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935311 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935329 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.935386 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:42 crc kubenswrapper[4760]: I1124 17:21:42.951621 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:42 crc kubenswrapper[4760]: E1124 17:21:42.952627 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-jvs8f log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="ff24f118-8c39-4ee1-abd1-94c9b09b0f22" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.036859 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037037 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037113 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037178 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvs8f\" (UniqueName: \"kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037211 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037293 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037347 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.037628 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.038303 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.041075 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.041140 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.041270 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.051888 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.062456 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvs8f\" (UniqueName: \"kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f\") pod \"ceilometer-0\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.477240 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c8fb97f-21ba-4875-8f92-90eb2639d942" path="/var/lib/kubelet/pods/1c8fb97f-21ba-4875-8f92-90eb2639d942/volumes" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.504999 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.516825 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:43 crc kubenswrapper[4760]: E1124 17:21:43.560379 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.647578 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.647827 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvs8f\" (UniqueName: \"kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.647906 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.647970 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.648076 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.648323 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.648402 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle\") pod \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\" (UID: \"ff24f118-8c39-4ee1-abd1-94c9b09b0f22\") " Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.648479 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.648866 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.650116 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.657648 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts" (OuterVolumeSpecName: "scripts") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.657728 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.658190 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f" (OuterVolumeSpecName: "kube-api-access-jvs8f") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "kube-api-access-jvs8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.664203 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.665088 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data" (OuterVolumeSpecName: "config-data") pod "ff24f118-8c39-4ee1-abd1-94c9b09b0f22" (UID: "ff24f118-8c39-4ee1-abd1-94c9b09b0f22"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.750995 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.751043 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.751054 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.751063 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvs8f\" (UniqueName: \"kubernetes.io/projected/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-kube-api-access-jvs8f\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.751074 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:43 crc kubenswrapper[4760]: I1124 17:21:43.751081 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff24f118-8c39-4ee1-abd1-94c9b09b0f22-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.513441 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.658129 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.667642 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.681666 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.683938 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.686902 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.687146 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.689863 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768095 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768177 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768376 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768504 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768655 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768786 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.768930 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qqmc\" (UniqueName: \"kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.870956 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871080 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871128 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qqmc\" (UniqueName: \"kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871171 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871208 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871235 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.871265 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.872029 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.872283 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.876755 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.877142 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.877567 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.880274 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:44 crc kubenswrapper[4760]: I1124 17:21:44.892316 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qqmc\" (UniqueName: \"kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc\") pod \"ceilometer-0\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " pod="openstack/ceilometer-0" Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.001670 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:45 crc kubenswrapper[4760]: W1124 17:21:45.482860 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad6b5ef6_edf5_40a5_a9ea_d73a4555ed4a.slice/crio-e659d8d06f6b41e66fee4288ead689c39ba1f2fc7210ef6d3c4ff638e62455e4 WatchSource:0}: Error finding container e659d8d06f6b41e66fee4288ead689c39ba1f2fc7210ef6d3c4ff638e62455e4: Status 404 returned error can't find the container with id e659d8d06f6b41e66fee4288ead689c39ba1f2fc7210ef6d3c4ff638e62455e4 Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.484296 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff24f118-8c39-4ee1-abd1-94c9b09b0f22" path="/var/lib/kubelet/pods/ff24f118-8c39-4ee1-abd1-94c9b09b0f22/volumes" Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.484637 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.527911 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jxp48" event={"ID":"79b8dc02-4f92-415c-be5c-0822ff170919","Type":"ContainerStarted","Data":"5367d58febd30efee2c861a698ae89894372b4386b462351523fa207707232f4"} Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.531189 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerStarted","Data":"e659d8d06f6b41e66fee4288ead689c39ba1f2fc7210ef6d3c4ff638e62455e4"} Nov 24 17:21:45 crc kubenswrapper[4760]: I1124 17:21:45.552855 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-jxp48" podStartSLOduration=2.501590071 podStartE2EDuration="14.55283676s" podCreationTimestamp="2025-11-24 17:21:31 +0000 UTC" firstStartedPulling="2025-11-24 17:21:32.426888202 +0000 UTC m=+1087.749769752" lastFinishedPulling="2025-11-24 17:21:44.478134891 +0000 UTC m=+1099.801016441" observedRunningTime="2025-11-24 17:21:45.543657917 +0000 UTC m=+1100.866539467" watchObservedRunningTime="2025-11-24 17:21:45.55283676 +0000 UTC m=+1100.875718310" Nov 24 17:21:46 crc kubenswrapper[4760]: I1124 17:21:46.541582 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerStarted","Data":"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39"} Nov 24 17:21:47 crc kubenswrapper[4760]: I1124 17:21:47.551262 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerStarted","Data":"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b"} Nov 24 17:21:48 crc kubenswrapper[4760]: I1124 17:21:48.570602 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerStarted","Data":"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a"} Nov 24 17:21:49 crc kubenswrapper[4760]: I1124 17:21:49.590058 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerStarted","Data":"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b"} Nov 24 17:21:49 crc kubenswrapper[4760]: I1124 17:21:49.590633 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:21:49 crc kubenswrapper[4760]: I1124 17:21:49.623343 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.505237201 podStartE2EDuration="5.623318107s" podCreationTimestamp="2025-11-24 17:21:44 +0000 UTC" firstStartedPulling="2025-11-24 17:21:45.48406899 +0000 UTC m=+1100.806950540" lastFinishedPulling="2025-11-24 17:21:48.602149896 +0000 UTC m=+1103.925031446" observedRunningTime="2025-11-24 17:21:49.609952844 +0000 UTC m=+1104.932834414" watchObservedRunningTime="2025-11-24 17:21:49.623318107 +0000 UTC m=+1104.946199687" Nov 24 17:21:53 crc kubenswrapper[4760]: E1124 17:21:53.810497 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.651572 4760 generic.go:334] "Generic (PLEG): container finished" podID="79b8dc02-4f92-415c-be5c-0822ff170919" containerID="5367d58febd30efee2c861a698ae89894372b4386b462351523fa207707232f4" exitCode=0 Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.651619 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jxp48" event={"ID":"79b8dc02-4f92-415c-be5c-0822ff170919","Type":"ContainerDied","Data":"5367d58febd30efee2c861a698ae89894372b4386b462351523fa207707232f4"} Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.785539 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.785913 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-central-agent" containerID="cri-o://78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39" gracePeriod=30 Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.786024 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="proxy-httpd" containerID="cri-o://3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b" gracePeriod=30 Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.786034 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="sg-core" containerID="cri-o://ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a" gracePeriod=30 Nov 24 17:21:54 crc kubenswrapper[4760]: I1124 17:21:54.786048 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-notification-agent" containerID="cri-o://11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b" gracePeriod=30 Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.661661 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerID="3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b" exitCode=0 Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.662100 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerID="ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a" exitCode=2 Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.661744 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerDied","Data":"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b"} Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.662153 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerDied","Data":"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a"} Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.662172 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerDied","Data":"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39"} Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.662115 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerID="78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39" exitCode=0 Nov 24 17:21:55 crc kubenswrapper[4760]: I1124 17:21:55.995207 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.082365 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7rs7\" (UniqueName: \"kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7\") pod \"79b8dc02-4f92-415c-be5c-0822ff170919\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.082427 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts\") pod \"79b8dc02-4f92-415c-be5c-0822ff170919\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.082505 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data\") pod \"79b8dc02-4f92-415c-be5c-0822ff170919\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.082541 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle\") pod \"79b8dc02-4f92-415c-be5c-0822ff170919\" (UID: \"79b8dc02-4f92-415c-be5c-0822ff170919\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.088047 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts" (OuterVolumeSpecName: "scripts") pod "79b8dc02-4f92-415c-be5c-0822ff170919" (UID: "79b8dc02-4f92-415c-be5c-0822ff170919"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.088261 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7" (OuterVolumeSpecName: "kube-api-access-q7rs7") pod "79b8dc02-4f92-415c-be5c-0822ff170919" (UID: "79b8dc02-4f92-415c-be5c-0822ff170919"). InnerVolumeSpecName "kube-api-access-q7rs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.113253 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data" (OuterVolumeSpecName: "config-data") pod "79b8dc02-4f92-415c-be5c-0822ff170919" (UID: "79b8dc02-4f92-415c-be5c-0822ff170919"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.126998 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79b8dc02-4f92-415c-be5c-0822ff170919" (UID: "79b8dc02-4f92-415c-be5c-0822ff170919"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.184112 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.184140 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.184150 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7rs7\" (UniqueName: \"kubernetes.io/projected/79b8dc02-4f92-415c-be5c-0822ff170919-kube-api-access-q7rs7\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.184159 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b8dc02-4f92-415c-be5c-0822ff170919-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.464856 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490068 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490129 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490165 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490211 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490265 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qqmc\" (UniqueName: \"kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490336 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.490385 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd\") pod \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\" (UID: \"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a\") " Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.493603 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.500622 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.503294 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts" (OuterVolumeSpecName: "scripts") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.508297 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc" (OuterVolumeSpecName: "kube-api-access-5qqmc") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "kube-api-access-5qqmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.536442 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.571413 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593346 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593379 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593388 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593397 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qqmc\" (UniqueName: \"kubernetes.io/projected/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-kube-api-access-5qqmc\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593407 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.593415 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.626944 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data" (OuterVolumeSpecName: "config-data") pod "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" (UID: "ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.671580 4760 generic.go:334] "Generic (PLEG): container finished" podID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerID="11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b" exitCode=0 Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.671649 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerDied","Data":"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b"} Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.671680 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a","Type":"ContainerDied","Data":"e659d8d06f6b41e66fee4288ead689c39ba1f2fc7210ef6d3c4ff638e62455e4"} Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.671702 4760 scope.go:117] "RemoveContainer" containerID="3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.671852 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.677577 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jxp48" event={"ID":"79b8dc02-4f92-415c-be5c-0822ff170919","Type":"ContainerDied","Data":"a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da"} Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.677606 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a19406a80009d1a1c2286faec2de578d7761364c386384db057cbdca051c57da" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.677655 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jxp48" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.694796 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.707070 4760 scope.go:117] "RemoveContainer" containerID="ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.718441 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.726915 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.737707 4760 scope.go:117] "RemoveContainer" containerID="11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.744528 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.744950 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b8dc02-4f92-415c-be5c-0822ff170919" containerName="nova-cell0-conductor-db-sync" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.744964 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b8dc02-4f92-415c-be5c-0822ff170919" containerName="nova-cell0-conductor-db-sync" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.744980 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-central-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.744988 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-central-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.745103 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-notification-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745114 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-notification-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.745138 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="sg-core" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745147 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="sg-core" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.745173 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="proxy-httpd" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745180 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="proxy-httpd" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745395 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="proxy-httpd" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745419 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b8dc02-4f92-415c-be5c-0822ff170919" containerName="nova-cell0-conductor-db-sync" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745430 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-notification-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745448 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="ceilometer-central-agent" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.745461 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" containerName="sg-core" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.747637 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.751864 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.752065 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.780835 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.795905 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.797473 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799599 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799646 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799672 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799748 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799787 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799810 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8jzr\" (UniqueName: \"kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.799864 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.800498 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-p9zv6" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.800687 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.809537 4760 scope.go:117] "RemoveContainer" containerID="78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.826131 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.855240 4760 scope.go:117] "RemoveContainer" containerID="3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.857525 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b\": container with ID starting with 3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b not found: ID does not exist" containerID="3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.857573 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b"} err="failed to get container status \"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b\": rpc error: code = NotFound desc = could not find container \"3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b\": container with ID starting with 3090c839bf1b088d47118c138e47951ae1006e4622284a8c0c78416b616ab84b not found: ID does not exist" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.857593 4760 scope.go:117] "RemoveContainer" containerID="ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.857798 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a\": container with ID starting with ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a not found: ID does not exist" containerID="ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.857814 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a"} err="failed to get container status \"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a\": rpc error: code = NotFound desc = could not find container \"ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a\": container with ID starting with ce25ee5db4242125333c5f0e9122e5c7639995b84d45c2e9bbe221e18891578a not found: ID does not exist" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.857828 4760 scope.go:117] "RemoveContainer" containerID="11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.858031 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b\": container with ID starting with 11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b not found: ID does not exist" containerID="11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.858046 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b"} err="failed to get container status \"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b\": rpc error: code = NotFound desc = could not find container \"11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b\": container with ID starting with 11c91030207cf68a29b20ff3c5eac2af48d46ec7416db950495b7aa54d2bd04b not found: ID does not exist" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.858058 4760 scope.go:117] "RemoveContainer" containerID="78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39" Nov 24 17:21:56 crc kubenswrapper[4760]: E1124 17:21:56.858214 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39\": container with ID starting with 78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39 not found: ID does not exist" containerID="78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.858230 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39"} err="failed to get container status \"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39\": rpc error: code = NotFound desc = could not find container \"78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39\": container with ID starting with 78260a0ad633ff1a34e246e2ac2a77622f44795324853fbecaca4f43a441df39 not found: ID does not exist" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.901841 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.901903 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.901933 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.901955 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902204 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902267 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902331 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902356 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8jzr\" (UniqueName: \"kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902486 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902495 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902588 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btbdn\" (UniqueName: \"kubernetes.io/projected/d5b9106d-4aee-4439-9c7d-41c1f015fd02-kube-api-access-btbdn\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.902810 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.907055 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.907652 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.907670 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.908775 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:56 crc kubenswrapper[4760]: I1124 17:21:56.933748 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8jzr\" (UniqueName: \"kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr\") pod \"ceilometer-0\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " pod="openstack/ceilometer-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.006081 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.006560 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btbdn\" (UniqueName: \"kubernetes.io/projected/d5b9106d-4aee-4439-9c7d-41c1f015fd02-kube-api-access-btbdn\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.006634 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.009997 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.010192 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b9106d-4aee-4439-9c7d-41c1f015fd02-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.023704 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btbdn\" (UniqueName: \"kubernetes.io/projected/d5b9106d-4aee-4439-9c7d-41c1f015fd02-kube-api-access-btbdn\") pod \"nova-cell0-conductor-0\" (UID: \"d5b9106d-4aee-4439-9c7d-41c1f015fd02\") " pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.089475 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.127296 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.478390 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a" path="/var/lib/kubelet/pods/ad6b5ef6-edf5-40a5-a9ea-d73a4555ed4a/volumes" Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.536689 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.643704 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.689777 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d5b9106d-4aee-4439-9c7d-41c1f015fd02","Type":"ContainerStarted","Data":"54af774f1086eaffbb082b3fcd137f52e5d07e135dd6c9460dec5f8b038f4145"} Nov 24 17:21:57 crc kubenswrapper[4760]: I1124 17:21:57.691995 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerStarted","Data":"7a0eef93cc92fd4bf636007c627a6e8257dbf8715fcd48473b3901e69afa6eb7"} Nov 24 17:21:58 crc kubenswrapper[4760]: I1124 17:21:58.719354 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d5b9106d-4aee-4439-9c7d-41c1f015fd02","Type":"ContainerStarted","Data":"ded068df4c3e343f201b047deed08a55ae54385af2f595ba7e51ef39305f1eb9"} Nov 24 17:21:58 crc kubenswrapper[4760]: I1124 17:21:58.720073 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 24 17:21:58 crc kubenswrapper[4760]: I1124 17:21:58.724628 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerStarted","Data":"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf"} Nov 24 17:21:58 crc kubenswrapper[4760]: I1124 17:21:58.743911 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.7438636130000003 podStartE2EDuration="2.743863613s" podCreationTimestamp="2025-11-24 17:21:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:21:58.738577811 +0000 UTC m=+1114.061459361" watchObservedRunningTime="2025-11-24 17:21:58.743863613 +0000 UTC m=+1114.066745173" Nov 24 17:21:59 crc kubenswrapper[4760]: I1124 17:21:59.739164 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerStarted","Data":"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b"} Nov 24 17:22:00 crc kubenswrapper[4760]: I1124 17:22:00.754311 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerStarted","Data":"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1"} Nov 24 17:22:01 crc kubenswrapper[4760]: I1124 17:22:01.768363 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerStarted","Data":"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df"} Nov 24 17:22:01 crc kubenswrapper[4760]: I1124 17:22:01.768726 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:22:01 crc kubenswrapper[4760]: I1124 17:22:01.808493 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.576425027 podStartE2EDuration="5.808462936s" podCreationTimestamp="2025-11-24 17:21:56 +0000 UTC" firstStartedPulling="2025-11-24 17:21:57.542098479 +0000 UTC m=+1112.864980029" lastFinishedPulling="2025-11-24 17:22:00.774136378 +0000 UTC m=+1116.097017938" observedRunningTime="2025-11-24 17:22:01.804077771 +0000 UTC m=+1117.126959371" watchObservedRunningTime="2025-11-24 17:22:01.808462936 +0000 UTC m=+1117.131344536" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.171245 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.600144 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-982zc"] Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.601553 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.604993 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.605665 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.640639 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-982zc"] Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.712227 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wffcr\" (UniqueName: \"kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.719933 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.719977 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.720028 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.816534 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.817628 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.820185 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.821247 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wffcr\" (UniqueName: \"kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.821298 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.821321 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.821340 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.830913 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.831710 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.842084 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.861183 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.870845 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wffcr\" (UniqueName: \"kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr\") pod \"nova-cell0-cell-mapping-982zc\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.927545 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.927620 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8xrc\" (UniqueName: \"kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.927838 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.950436 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.952788 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.954337 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:02 crc kubenswrapper[4760]: I1124 17:22:02.960411 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.002399 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050576 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050696 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050754 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050779 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050850 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050884 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8xrc\" (UniqueName: \"kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.050920 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2hm9\" (UniqueName: \"kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.066664 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.068600 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.066829 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.076384 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.089526 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.145717 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8xrc\" (UniqueName: \"kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc\") pod \"nova-scheduler-0\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.156997 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.157057 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.157113 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2hm9\" (UniqueName: \"kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.157185 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.159719 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.176579 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.181683 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.191569 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.215069 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.216242 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.217762 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2hm9\" (UniqueName: \"kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9\") pod \"nova-api-0\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.220411 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.224511 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.235439 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283717 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zvlr\" (UniqueName: \"kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283801 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283833 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283883 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283910 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztc5q\" (UniqueName: \"kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283942 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.283981 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.286107 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.290349 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.292152 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.314074 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385197 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zvlr\" (UniqueName: \"kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385263 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385293 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385316 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385353 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swv6n\" (UniqueName: \"kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385401 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385436 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385482 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385512 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385542 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztc5q\" (UniqueName: \"kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385598 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.385636 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.390154 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.394598 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.396015 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.402639 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.419193 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.426198 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zvlr\" (UniqueName: \"kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.450899 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztc5q\" (UniqueName: \"kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q\") pod \"nova-metadata-0\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493773 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493803 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493823 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493850 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swv6n\" (UniqueName: \"kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493907 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.493937 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.497698 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.496404 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.499217 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.501688 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.503656 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.518100 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swv6n\" (UniqueName: \"kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n\") pod \"dnsmasq-dns-757b4f8459-d5dth\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.579346 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.642778 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.696525 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.804927 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-982zc"] Nov 24 17:22:03 crc kubenswrapper[4760]: I1124 17:22:03.987482 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:03 crc kubenswrapper[4760]: W1124 17:22:03.996059 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod226a3269_3db4_471f_947b_44f0d9e1f5af.slice/crio-2f4f68e47f10a1d39f768cc0f94d1f7ef3762dde7e216cc9d151e2aec49750f3 WatchSource:0}: Error finding container 2f4f68e47f10a1d39f768cc0f94d1f7ef3762dde7e216cc9d151e2aec49750f3: Status 404 returned error can't find the container with id 2f4f68e47f10a1d39f768cc0f94d1f7ef3762dde7e216cc9d151e2aec49750f3 Nov 24 17:22:04 crc kubenswrapper[4760]: E1124 17:22:04.129460 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.260898 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.344201 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4xzmb"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.345832 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.350866 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.351519 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.361286 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4xzmb"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.528838 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.528913 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.528953 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.529016 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8r2r\" (UniqueName: \"kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.630409 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.630463 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.630501 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.630543 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8r2r\" (UniqueName: \"kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.635665 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.639589 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.642050 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.651578 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8r2r\" (UniqueName: \"kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r\") pod \"nova-cell1-conductor-db-sync-4xzmb\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.674439 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.824062 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.849665 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.874862 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.927781 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-982zc" event={"ID":"4ceea6f9-40cc-4203-8d24-79bb3b19eebe","Type":"ContainerStarted","Data":"8051cdac390a97e6818990f57d131f6ed1953606210d5921be2a42b25a2a34c2"} Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.927834 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-982zc" event={"ID":"4ceea6f9-40cc-4203-8d24-79bb3b19eebe","Type":"ContainerStarted","Data":"1e7ef5d1e5b594b35b5c9e5675325d84ea697b4cf2cd3324a28cb4cbd60d6f2c"} Nov 24 17:22:04 crc kubenswrapper[4760]: W1124 17:22:04.935523 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34102e46_18bf_4d17_8718_26ce42e706ae.slice/crio-1873d3213309c9f87ade4ab4b4fb8a8c512cc90a661739ef70caf8b135255c9d WatchSource:0}: Error finding container 1873d3213309c9f87ade4ab4b4fb8a8c512cc90a661739ef70caf8b135255c9d: Status 404 returned error can't find the container with id 1873d3213309c9f87ade4ab4b4fb8a8c512cc90a661739ef70caf8b135255c9d Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.942859 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586a40e1-bd4d-46ed-ba0a-8c0f83a52996","Type":"ContainerStarted","Data":"a8cb511c4227c1af906a690df832340f45cfcde3570f902096f6f9cb19cdc061"} Nov 24 17:22:04 crc kubenswrapper[4760]: I1124 17:22:04.960640 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerStarted","Data":"2f4f68e47f10a1d39f768cc0f94d1f7ef3762dde7e216cc9d151e2aec49750f3"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.013289 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-982zc" podStartSLOduration=3.013263596 podStartE2EDuration="3.013263596s" podCreationTimestamp="2025-11-24 17:22:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:04.982754762 +0000 UTC m=+1120.305636312" watchObservedRunningTime="2025-11-24 17:22:05.013263596 +0000 UTC m=+1120.336145146" Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.226236 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4xzmb"] Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.971647 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerStarted","Data":"1873d3213309c9f87ade4ab4b4fb8a8c512cc90a661739ef70caf8b135255c9d"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.977572 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b4ca074-b020-4361-8308-f09a09c1bcff","Type":"ContainerStarted","Data":"b425d1d4f6a8798fde81cfa30a7306b0739883bb97b0b4114ac194a78cc49dff"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.980404 4760 generic.go:334] "Generic (PLEG): container finished" podID="8b292f44-3605-46d8-950d-39454e65e258" containerID="1bccadad484fe72ceed7d6cb49d49cd8b3761d9b7328f836a8bbe9e0bb89d306" exitCode=0 Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.980599 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" event={"ID":"8b292f44-3605-46d8-950d-39454e65e258","Type":"ContainerDied","Data":"1bccadad484fe72ceed7d6cb49d49cd8b3761d9b7328f836a8bbe9e0bb89d306"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.980650 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" event={"ID":"8b292f44-3605-46d8-950d-39454e65e258","Type":"ContainerStarted","Data":"2e2e8595778809b3eab741379b83bcc79451b77e457cb607dd8ba42ebfea7096"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.984629 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" event={"ID":"5870c82c-79ce-46d0-861b-bfebbab194f9","Type":"ContainerStarted","Data":"8c52a38789ce8fe16540733510b8a381c264f8d6854afb30dc68241931266d06"} Nov 24 17:22:05 crc kubenswrapper[4760]: I1124 17:22:05.984654 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" event={"ID":"5870c82c-79ce-46d0-861b-bfebbab194f9","Type":"ContainerStarted","Data":"786ae7185beb88c9e33f81ae7b2837e78e29f7425d8378aa1bec6a2f6c245d6c"} Nov 24 17:22:06 crc kubenswrapper[4760]: I1124 17:22:06.076491 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" podStartSLOduration=2.07647595 podStartE2EDuration="2.07647595s" podCreationTimestamp="2025-11-24 17:22:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:06.070058776 +0000 UTC m=+1121.392940326" watchObservedRunningTime="2025-11-24 17:22:06.07647595 +0000 UTC m=+1121.399357500" Nov 24 17:22:07 crc kubenswrapper[4760]: I1124 17:22:07.925159 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:07 crc kubenswrapper[4760]: I1124 17:22:07.936856 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.020601 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerStarted","Data":"a5d94d2f056d8ab54ea8bfa3813d0c5e2cefe5d64573819f43b77eac24a33b5e"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.021121 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerStarted","Data":"422fddc4c0bc32cc8972758d507d51d1a5b22534d1848b583ffc4e7ef3089357"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.022678 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerStarted","Data":"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.022712 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerStarted","Data":"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.022791 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-log" containerID="cri-o://a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" gracePeriod=30 Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.023148 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-metadata" containerID="cri-o://b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" gracePeriod=30 Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.036529 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b4ca074-b020-4361-8308-f09a09c1bcff","Type":"ContainerStarted","Data":"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.036581 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="9b4ca074-b020-4361-8308-f09a09c1bcff" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4" gracePeriod=30 Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.044618 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" event={"ID":"8b292f44-3605-46d8-950d-39454e65e258","Type":"ContainerStarted","Data":"88993599c4082243864390714cd9c961b2aa31184a1c1fbb31bedb247f4a2179"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.044771 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.047308 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586a40e1-bd4d-46ed-ba0a-8c0f83a52996","Type":"ContainerStarted","Data":"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91"} Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.052327 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.2896223940000002 podStartE2EDuration="7.052305579s" podCreationTimestamp="2025-11-24 17:22:02 +0000 UTC" firstStartedPulling="2025-11-24 17:22:04.006018574 +0000 UTC m=+1119.328900124" lastFinishedPulling="2025-11-24 17:22:07.768701759 +0000 UTC m=+1123.091583309" observedRunningTime="2025-11-24 17:22:09.037163985 +0000 UTC m=+1124.360045565" watchObservedRunningTime="2025-11-24 17:22:09.052305579 +0000 UTC m=+1124.375187169" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.104272 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.30290978 podStartE2EDuration="7.104253858s" podCreationTimestamp="2025-11-24 17:22:02 +0000 UTC" firstStartedPulling="2025-11-24 17:22:04.964750826 +0000 UTC m=+1120.287632376" lastFinishedPulling="2025-11-24 17:22:07.766094884 +0000 UTC m=+1123.088976454" observedRunningTime="2025-11-24 17:22:09.073490696 +0000 UTC m=+1124.396372256" watchObservedRunningTime="2025-11-24 17:22:09.104253858 +0000 UTC m=+1124.427135408" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.163090 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.328267316 podStartE2EDuration="7.163069763s" podCreationTimestamp="2025-11-24 17:22:02 +0000 UTC" firstStartedPulling="2025-11-24 17:22:04.942793627 +0000 UTC m=+1120.265675177" lastFinishedPulling="2025-11-24 17:22:07.777596074 +0000 UTC m=+1123.100477624" observedRunningTime="2025-11-24 17:22:09.118335951 +0000 UTC m=+1124.441217501" watchObservedRunningTime="2025-11-24 17:22:09.163069763 +0000 UTC m=+1124.485951323" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.183149 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.7045608039999998 podStartE2EDuration="7.183130778s" podCreationTimestamp="2025-11-24 17:22:02 +0000 UTC" firstStartedPulling="2025-11-24 17:22:04.287583742 +0000 UTC m=+1119.610465292" lastFinishedPulling="2025-11-24 17:22:07.766153716 +0000 UTC m=+1123.089035266" observedRunningTime="2025-11-24 17:22:09.151290075 +0000 UTC m=+1124.474171625" watchObservedRunningTime="2025-11-24 17:22:09.183130778 +0000 UTC m=+1124.506012328" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.193480 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" podStartSLOduration=6.193461304 podStartE2EDuration="6.193461304s" podCreationTimestamp="2025-11-24 17:22:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:09.18182105 +0000 UTC m=+1124.504702600" watchObservedRunningTime="2025-11-24 17:22:09.193461304 +0000 UTC m=+1124.516342854" Nov 24 17:22:09 crc kubenswrapper[4760]: I1124 17:22:09.939172 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.058327 4760 generic.go:334] "Generic (PLEG): container finished" podID="34102e46-18bf-4d17-8718-26ce42e706ae" containerID="b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" exitCode=0 Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.058357 4760 generic.go:334] "Generic (PLEG): container finished" podID="34102e46-18bf-4d17-8718-26ce42e706ae" containerID="a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" exitCode=143 Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.059212 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerDied","Data":"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad"} Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.059262 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerDied","Data":"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027"} Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.059275 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"34102e46-18bf-4d17-8718-26ce42e706ae","Type":"ContainerDied","Data":"1873d3213309c9f87ade4ab4b4fb8a8c512cc90a661739ef70caf8b135255c9d"} Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.059293 4760 scope.go:117] "RemoveContainer" containerID="b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.059321 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.069818 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztc5q\" (UniqueName: \"kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q\") pod \"34102e46-18bf-4d17-8718-26ce42e706ae\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.070754 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs\") pod \"34102e46-18bf-4d17-8718-26ce42e706ae\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.070927 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle\") pod \"34102e46-18bf-4d17-8718-26ce42e706ae\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.070993 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data\") pod \"34102e46-18bf-4d17-8718-26ce42e706ae\" (UID: \"34102e46-18bf-4d17-8718-26ce42e706ae\") " Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.072940 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs" (OuterVolumeSpecName: "logs") pod "34102e46-18bf-4d17-8718-26ce42e706ae" (UID: "34102e46-18bf-4d17-8718-26ce42e706ae"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.073969 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34102e46-18bf-4d17-8718-26ce42e706ae-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.092057 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q" (OuterVolumeSpecName: "kube-api-access-ztc5q") pod "34102e46-18bf-4d17-8718-26ce42e706ae" (UID: "34102e46-18bf-4d17-8718-26ce42e706ae"). InnerVolumeSpecName "kube-api-access-ztc5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.100567 4760 scope.go:117] "RemoveContainer" containerID="a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.121261 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34102e46-18bf-4d17-8718-26ce42e706ae" (UID: "34102e46-18bf-4d17-8718-26ce42e706ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.123465 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data" (OuterVolumeSpecName: "config-data") pod "34102e46-18bf-4d17-8718-26ce42e706ae" (UID: "34102e46-18bf-4d17-8718-26ce42e706ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.178281 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.178316 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34102e46-18bf-4d17-8718-26ce42e706ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.178326 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztc5q\" (UniqueName: \"kubernetes.io/projected/34102e46-18bf-4d17-8718-26ce42e706ae-kube-api-access-ztc5q\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.242509 4760 scope.go:117] "RemoveContainer" containerID="b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" Nov 24 17:22:10 crc kubenswrapper[4760]: E1124 17:22:10.243379 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad\": container with ID starting with b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad not found: ID does not exist" containerID="b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.243425 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad"} err="failed to get container status \"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad\": rpc error: code = NotFound desc = could not find container \"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad\": container with ID starting with b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad not found: ID does not exist" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.243457 4760 scope.go:117] "RemoveContainer" containerID="a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" Nov 24 17:22:10 crc kubenswrapper[4760]: E1124 17:22:10.243933 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027\": container with ID starting with a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027 not found: ID does not exist" containerID="a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.243989 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027"} err="failed to get container status \"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027\": rpc error: code = NotFound desc = could not find container \"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027\": container with ID starting with a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027 not found: ID does not exist" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.244058 4760 scope.go:117] "RemoveContainer" containerID="b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.244398 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad"} err="failed to get container status \"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad\": rpc error: code = NotFound desc = could not find container \"b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad\": container with ID starting with b1ac09f1a7f82db38ab6cda7eb4253120b475cc0e03d3ff5e8def538cad50fad not found: ID does not exist" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.244424 4760 scope.go:117] "RemoveContainer" containerID="a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.244724 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027"} err="failed to get container status \"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027\": rpc error: code = NotFound desc = could not find container \"a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027\": container with ID starting with a51724d335258eee1de97153b1e6c98777b8859332bdab537e1fb2f00c1c3027 not found: ID does not exist" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.397137 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.422241 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.432353 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:10 crc kubenswrapper[4760]: E1124 17:22:10.433035 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-metadata" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.433051 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-metadata" Nov 24 17:22:10 crc kubenswrapper[4760]: E1124 17:22:10.433092 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-log" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.433099 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-log" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.433281 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-metadata" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.433299 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" containerName="nova-metadata-log" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.434174 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.438862 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.439290 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.443123 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.493391 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.493450 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp2dx\" (UniqueName: \"kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.493567 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.493637 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.493686 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.595340 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.595433 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.595477 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.595551 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp2dx\" (UniqueName: \"kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.595576 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.596133 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.600065 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.600691 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.604638 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.612133 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp2dx\" (UniqueName: \"kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx\") pod \"nova-metadata-0\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " pod="openstack/nova-metadata-0" Nov 24 17:22:10 crc kubenswrapper[4760]: I1124 17:22:10.762699 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:11 crc kubenswrapper[4760]: I1124 17:22:11.233462 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:11 crc kubenswrapper[4760]: W1124 17:22:11.235636 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50bca488_c675_4109_84d3_e70fb254c23c.slice/crio-dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151 WatchSource:0}: Error finding container dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151: Status 404 returned error can't find the container with id dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151 Nov 24 17:22:11 crc kubenswrapper[4760]: I1124 17:22:11.478539 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34102e46-18bf-4d17-8718-26ce42e706ae" path="/var/lib/kubelet/pods/34102e46-18bf-4d17-8718-26ce42e706ae/volumes" Nov 24 17:22:12 crc kubenswrapper[4760]: I1124 17:22:12.083875 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerStarted","Data":"c4168340d9217021e68a6868dfc0363a2bc35187b2704860d31f25ab6293d563"} Nov 24 17:22:12 crc kubenswrapper[4760]: I1124 17:22:12.083918 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerStarted","Data":"e0846ff9301355e0564be5cb862a556e20c22383598b1d7463473062e200cf42"} Nov 24 17:22:12 crc kubenswrapper[4760]: I1124 17:22:12.083930 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerStarted","Data":"dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151"} Nov 24 17:22:12 crc kubenswrapper[4760]: I1124 17:22:12.105414 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.105393541 podStartE2EDuration="2.105393541s" podCreationTimestamp="2025-11-24 17:22:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:12.099513992 +0000 UTC m=+1127.422395542" watchObservedRunningTime="2025-11-24 17:22:12.105393541 +0000 UTC m=+1127.428275091" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.093884 4760 generic.go:334] "Generic (PLEG): container finished" podID="5870c82c-79ce-46d0-861b-bfebbab194f9" containerID="8c52a38789ce8fe16540733510b8a381c264f8d6854afb30dc68241931266d06" exitCode=0 Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.093974 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" event={"ID":"5870c82c-79ce-46d0-861b-bfebbab194f9","Type":"ContainerDied","Data":"8c52a38789ce8fe16540733510b8a381c264f8d6854afb30dc68241931266d06"} Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.096380 4760 generic.go:334] "Generic (PLEG): container finished" podID="4ceea6f9-40cc-4203-8d24-79bb3b19eebe" containerID="8051cdac390a97e6818990f57d131f6ed1953606210d5921be2a42b25a2a34c2" exitCode=0 Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.096734 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-982zc" event={"ID":"4ceea6f9-40cc-4203-8d24-79bb3b19eebe","Type":"ContainerDied","Data":"8051cdac390a97e6818990f57d131f6ed1953606210d5921be2a42b25a2a34c2"} Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.225745 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.225810 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.255616 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.286978 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.287075 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.643247 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.699347 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.770214 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:22:13 crc kubenswrapper[4760]: I1124 17:22:13.770456 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="dnsmasq-dns" containerID="cri-o://0d675610c6c72f67c7f4f0eae825ce80cc26a753b54dff49504ff29734e755fe" gracePeriod=10 Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.106962 4760 generic.go:334] "Generic (PLEG): container finished" podID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerID="0d675610c6c72f67c7f4f0eae825ce80cc26a753b54dff49504ff29734e755fe" exitCode=0 Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.107047 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" event={"ID":"64125b05-f62e-43ea-a1b4-25785686d5e8","Type":"ContainerDied","Data":"0d675610c6c72f67c7f4f0eae825ce80cc26a753b54dff49504ff29734e755fe"} Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.185283 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.369934 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.370296 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.422128 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.487928 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9r25\" (UniqueName: \"kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.488060 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.488107 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.488150 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.488177 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.488299 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config\") pod \"64125b05-f62e-43ea-a1b4-25785686d5e8\" (UID: \"64125b05-f62e-43ea-a1b4-25785686d5e8\") " Nov 24 17:22:14 crc kubenswrapper[4760]: E1124 17:22:14.517629 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.522897 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25" (OuterVolumeSpecName: "kube-api-access-c9r25") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "kube-api-access-c9r25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.593497 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config" (OuterVolumeSpecName: "config") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.594910 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.594937 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9r25\" (UniqueName: \"kubernetes.io/projected/64125b05-f62e-43ea-a1b4-25785686d5e8-kube-api-access-c9r25\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.645461 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.646122 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.651575 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.661152 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.664660 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705363 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle\") pod \"5870c82c-79ce-46d0-861b-bfebbab194f9\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705485 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8r2r\" (UniqueName: \"kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r\") pod \"5870c82c-79ce-46d0-861b-bfebbab194f9\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705645 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle\") pod \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705696 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts\") pod \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705717 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts\") pod \"5870c82c-79ce-46d0-861b-bfebbab194f9\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705812 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data\") pod \"5870c82c-79ce-46d0-861b-bfebbab194f9\" (UID: \"5870c82c-79ce-46d0-861b-bfebbab194f9\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705883 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data\") pod \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.705924 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wffcr\" (UniqueName: \"kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr\") pod \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\" (UID: \"4ceea6f9-40cc-4203-8d24-79bb3b19eebe\") " Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.706602 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.706626 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.706641 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.717586 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts" (OuterVolumeSpecName: "scripts") pod "4ceea6f9-40cc-4203-8d24-79bb3b19eebe" (UID: "4ceea6f9-40cc-4203-8d24-79bb3b19eebe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.718087 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts" (OuterVolumeSpecName: "scripts") pod "5870c82c-79ce-46d0-861b-bfebbab194f9" (UID: "5870c82c-79ce-46d0-861b-bfebbab194f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.718708 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "64125b05-f62e-43ea-a1b4-25785686d5e8" (UID: "64125b05-f62e-43ea-a1b4-25785686d5e8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.718924 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr" (OuterVolumeSpecName: "kube-api-access-wffcr") pod "4ceea6f9-40cc-4203-8d24-79bb3b19eebe" (UID: "4ceea6f9-40cc-4203-8d24-79bb3b19eebe"). InnerVolumeSpecName "kube-api-access-wffcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.723682 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r" (OuterVolumeSpecName: "kube-api-access-w8r2r") pod "5870c82c-79ce-46d0-861b-bfebbab194f9" (UID: "5870c82c-79ce-46d0-861b-bfebbab194f9"). InnerVolumeSpecName "kube-api-access-w8r2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.738867 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ceea6f9-40cc-4203-8d24-79bb3b19eebe" (UID: "4ceea6f9-40cc-4203-8d24-79bb3b19eebe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.746577 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data" (OuterVolumeSpecName: "config-data") pod "5870c82c-79ce-46d0-861b-bfebbab194f9" (UID: "5870c82c-79ce-46d0-861b-bfebbab194f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.746635 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5870c82c-79ce-46d0-861b-bfebbab194f9" (UID: "5870c82c-79ce-46d0-861b-bfebbab194f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.753155 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data" (OuterVolumeSpecName: "config-data") pod "4ceea6f9-40cc-4203-8d24-79bb3b19eebe" (UID: "4ceea6f9-40cc-4203-8d24-79bb3b19eebe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807896 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807928 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807937 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807945 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807953 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64125b05-f62e-43ea-a1b4-25785686d5e8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807961 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807969 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wffcr\" (UniqueName: \"kubernetes.io/projected/4ceea6f9-40cc-4203-8d24-79bb3b19eebe-kube-api-access-wffcr\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807979 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5870c82c-79ce-46d0-861b-bfebbab194f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:14 crc kubenswrapper[4760]: I1124 17:22:14.807987 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8r2r\" (UniqueName: \"kubernetes.io/projected/5870c82c-79ce-46d0-861b-bfebbab194f9-kube-api-access-w8r2r\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.121873 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" event={"ID":"64125b05-f62e-43ea-a1b4-25785686d5e8","Type":"ContainerDied","Data":"7990c9e951f9402d377359232584e487b2e71a6b992ef1c64d1d3156b5989c43"} Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.121957 4760 scope.go:117] "RemoveContainer" containerID="0d675610c6c72f67c7f4f0eae825ce80cc26a753b54dff49504ff29734e755fe" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.122144 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-ttg5w" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.129288 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-982zc" event={"ID":"4ceea6f9-40cc-4203-8d24-79bb3b19eebe","Type":"ContainerDied","Data":"1e7ef5d1e5b594b35b5c9e5675325d84ea697b4cf2cd3324a28cb4cbd60d6f2c"} Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.129331 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e7ef5d1e5b594b35b5c9e5675325d84ea697b4cf2cd3324a28cb4cbd60d6f2c" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.129455 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-982zc" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.134867 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" event={"ID":"5870c82c-79ce-46d0-861b-bfebbab194f9","Type":"ContainerDied","Data":"786ae7185beb88c9e33f81ae7b2837e78e29f7425d8378aa1bec6a2f6c245d6c"} Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.134954 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="786ae7185beb88c9e33f81ae7b2837e78e29f7425d8378aa1bec6a2f6c245d6c" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.134886 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4xzmb" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.189115 4760 scope.go:117] "RemoveContainer" containerID="ff8ac5ce76b923515cdfa993e8f5d1872d431ca2b4a92053388f6dcb155e1aea" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.191884 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.201443 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-ttg5w"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.253301 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 17:22:15 crc kubenswrapper[4760]: E1124 17:22:15.253989 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5870c82c-79ce-46d0-861b-bfebbab194f9" containerName="nova-cell1-conductor-db-sync" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254035 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5870c82c-79ce-46d0-861b-bfebbab194f9" containerName="nova-cell1-conductor-db-sync" Nov 24 17:22:15 crc kubenswrapper[4760]: E1124 17:22:15.254064 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="dnsmasq-dns" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254072 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="dnsmasq-dns" Nov 24 17:22:15 crc kubenswrapper[4760]: E1124 17:22:15.254092 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ceea6f9-40cc-4203-8d24-79bb3b19eebe" containerName="nova-manage" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254100 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ceea6f9-40cc-4203-8d24-79bb3b19eebe" containerName="nova-manage" Nov 24 17:22:15 crc kubenswrapper[4760]: E1124 17:22:15.254116 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="init" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254123 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="init" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254337 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ceea6f9-40cc-4203-8d24-79bb3b19eebe" containerName="nova-manage" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254359 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" containerName="dnsmasq-dns" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.254373 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="5870c82c-79ce-46d0-861b-bfebbab194f9" containerName="nova-cell1-conductor-db-sync" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.255374 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.259483 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.261860 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.319309 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.319387 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m6zq\" (UniqueName: \"kubernetes.io/projected/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-kube-api-access-9m6zq\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.319439 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.419111 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.422049 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.422211 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m6zq\" (UniqueName: \"kubernetes.io/projected/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-kube-api-access-9m6zq\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.422822 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.427538 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.428450 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.431358 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.431577 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-log" containerID="cri-o://422fddc4c0bc32cc8972758d507d51d1a5b22534d1848b583ffc4e7ef3089357" gracePeriod=30 Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.432965 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-api" containerID="cri-o://a5d94d2f056d8ab54ea8bfa3813d0c5e2cefe5d64573819f43b77eac24a33b5e" gracePeriod=30 Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.465599 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m6zq\" (UniqueName: \"kubernetes.io/projected/61870fa4-1b0e-450c-a2a8-06d3ba20cd3e-kube-api-access-9m6zq\") pod \"nova-cell1-conductor-0\" (UID: \"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e\") " pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.502281 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64125b05-f62e-43ea-a1b4-25785686d5e8" path="/var/lib/kubelet/pods/64125b05-f62e-43ea-a1b4-25785686d5e8/volumes" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.502925 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.503101 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-log" containerID="cri-o://e0846ff9301355e0564be5cb862a556e20c22383598b1d7463473062e200cf42" gracePeriod=30 Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.503430 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-metadata" containerID="cri-o://c4168340d9217021e68a6868dfc0363a2bc35187b2704860d31f25ab6293d563" gracePeriod=30 Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.618914 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.763953 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:22:15 crc kubenswrapper[4760]: I1124 17:22:15.764442 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.116665 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.194319 4760 generic.go:334] "Generic (PLEG): container finished" podID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerID="422fddc4c0bc32cc8972758d507d51d1a5b22534d1848b583ffc4e7ef3089357" exitCode=143 Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.194412 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerDied","Data":"422fddc4c0bc32cc8972758d507d51d1a5b22534d1848b583ffc4e7ef3089357"} Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197352 4760 generic.go:334] "Generic (PLEG): container finished" podID="50bca488-c675-4109-84d3-e70fb254c23c" containerID="c4168340d9217021e68a6868dfc0363a2bc35187b2704860d31f25ab6293d563" exitCode=0 Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197374 4760 generic.go:334] "Generic (PLEG): container finished" podID="50bca488-c675-4109-84d3-e70fb254c23c" containerID="e0846ff9301355e0564be5cb862a556e20c22383598b1d7463473062e200cf42" exitCode=143 Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197427 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerDied","Data":"c4168340d9217021e68a6868dfc0363a2bc35187b2704860d31f25ab6293d563"} Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197459 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerDied","Data":"e0846ff9301355e0564be5cb862a556e20c22383598b1d7463473062e200cf42"} Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197473 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"50bca488-c675-4109-84d3-e70fb254c23c","Type":"ContainerDied","Data":"dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151"} Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.197490 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dadc60f8492358afc552a4bd010d1a69fac0abb56a89837254e34ed28d626151" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.207812 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerName="nova-scheduler-scheduler" containerID="cri-o://84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" gracePeriod=30 Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.208518 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e","Type":"ContainerStarted","Data":"5e738f7d5dbee904c9a61dc1e558ba83baf12452aaf80d54c523f9623fe89c89"} Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.277176 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.352351 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp2dx\" (UniqueName: \"kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx\") pod \"50bca488-c675-4109-84d3-e70fb254c23c\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.352471 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data\") pod \"50bca488-c675-4109-84d3-e70fb254c23c\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.352593 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs\") pod \"50bca488-c675-4109-84d3-e70fb254c23c\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.352700 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs\") pod \"50bca488-c675-4109-84d3-e70fb254c23c\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.352753 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle\") pod \"50bca488-c675-4109-84d3-e70fb254c23c\" (UID: \"50bca488-c675-4109-84d3-e70fb254c23c\") " Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.353384 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs" (OuterVolumeSpecName: "logs") pod "50bca488-c675-4109-84d3-e70fb254c23c" (UID: "50bca488-c675-4109-84d3-e70fb254c23c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.374213 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx" (OuterVolumeSpecName: "kube-api-access-tp2dx") pod "50bca488-c675-4109-84d3-e70fb254c23c" (UID: "50bca488-c675-4109-84d3-e70fb254c23c"). InnerVolumeSpecName "kube-api-access-tp2dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.386240 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data" (OuterVolumeSpecName: "config-data") pod "50bca488-c675-4109-84d3-e70fb254c23c" (UID: "50bca488-c675-4109-84d3-e70fb254c23c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.399893 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50bca488-c675-4109-84d3-e70fb254c23c" (UID: "50bca488-c675-4109-84d3-e70fb254c23c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.414689 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "50bca488-c675-4109-84d3-e70fb254c23c" (UID: "50bca488-c675-4109-84d3-e70fb254c23c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.455212 4760 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.455564 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.455576 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp2dx\" (UniqueName: \"kubernetes.io/projected/50bca488-c675-4109-84d3-e70fb254c23c-kube-api-access-tp2dx\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.455590 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50bca488-c675-4109-84d3-e70fb254c23c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:16 crc kubenswrapper[4760]: I1124 17:22:16.455604 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50bca488-c675-4109-84d3-e70fb254c23c-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.218199 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.221267 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61870fa4-1b0e-450c-a2a8-06d3ba20cd3e","Type":"ContainerStarted","Data":"b776834f088ee76f2b6b0b6886b8d74c5553213eb28a02ad0eb0fa52bbd6bf74"} Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.221473 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.245295 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.245273686 podStartE2EDuration="2.245273686s" podCreationTimestamp="2025-11-24 17:22:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:17.241469647 +0000 UTC m=+1132.564351217" watchObservedRunningTime="2025-11-24 17:22:17.245273686 +0000 UTC m=+1132.568155236" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.260385 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.268133 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.287744 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:17 crc kubenswrapper[4760]: E1124 17:22:17.288188 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-metadata" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.288205 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-metadata" Nov 24 17:22:17 crc kubenswrapper[4760]: E1124 17:22:17.288218 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-log" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.288224 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-log" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.288389 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-metadata" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.288410 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="50bca488-c675-4109-84d3-e70fb254c23c" containerName="nova-metadata-log" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.289346 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.291240 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.298732 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.300358 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.371455 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w5vt\" (UniqueName: \"kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.371513 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.371950 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.372102 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.372139 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.474885 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.474957 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.474989 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.475048 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w5vt\" (UniqueName: \"kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.475086 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.475603 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.482391 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.489602 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50bca488-c675-4109-84d3-e70fb254c23c" path="/var/lib/kubelet/pods/50bca488-c675-4109-84d3-e70fb254c23c/volumes" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.492192 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w5vt\" (UniqueName: \"kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.495900 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.496683 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " pod="openstack/nova-metadata-0" Nov 24 17:22:17 crc kubenswrapper[4760]: I1124 17:22:17.625493 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:22:18 crc kubenswrapper[4760]: I1124 17:22:18.118908 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:22:18 crc kubenswrapper[4760]: I1124 17:22:18.227423 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerStarted","Data":"95b650c14e2292bc9365b56e5beea9fa6c447288eda2d88d307ecf8ca4bd6128"} Nov 24 17:22:18 crc kubenswrapper[4760]: E1124 17:22:18.229764 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:22:18 crc kubenswrapper[4760]: E1124 17:22:18.231060 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:22:18 crc kubenswrapper[4760]: E1124 17:22:18.232122 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:22:18 crc kubenswrapper[4760]: E1124 17:22:18.232150 4760 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerName="nova-scheduler-scheduler" Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.239948 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerStarted","Data":"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94"} Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.240305 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerStarted","Data":"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812"} Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.268663 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.268645244 podStartE2EDuration="2.268645244s" podCreationTimestamp="2025-11-24 17:22:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:19.266728229 +0000 UTC m=+1134.589609799" watchObservedRunningTime="2025-11-24 17:22:19.268645244 +0000 UTC m=+1134.591526784" Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.885950 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.915805 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data\") pod \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.915958 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8xrc\" (UniqueName: \"kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc\") pod \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.916033 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle\") pod \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\" (UID: \"586a40e1-bd4d-46ed-ba0a-8c0f83a52996\") " Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.945640 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc" (OuterVolumeSpecName: "kube-api-access-q8xrc") pod "586a40e1-bd4d-46ed-ba0a-8c0f83a52996" (UID: "586a40e1-bd4d-46ed-ba0a-8c0f83a52996"). InnerVolumeSpecName "kube-api-access-q8xrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.957611 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "586a40e1-bd4d-46ed-ba0a-8c0f83a52996" (UID: "586a40e1-bd4d-46ed-ba0a-8c0f83a52996"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:19 crc kubenswrapper[4760]: I1124 17:22:19.982437 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data" (OuterVolumeSpecName: "config-data") pod "586a40e1-bd4d-46ed-ba0a-8c0f83a52996" (UID: "586a40e1-bd4d-46ed-ba0a-8c0f83a52996"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.018213 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8xrc\" (UniqueName: \"kubernetes.io/projected/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-kube-api-access-q8xrc\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.018256 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.018284 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586a40e1-bd4d-46ed-ba0a-8c0f83a52996-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.258671 4760 generic.go:334] "Generic (PLEG): container finished" podID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" exitCode=0 Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.258753 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.258767 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586a40e1-bd4d-46ed-ba0a-8c0f83a52996","Type":"ContainerDied","Data":"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91"} Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.258812 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586a40e1-bd4d-46ed-ba0a-8c0f83a52996","Type":"ContainerDied","Data":"a8cb511c4227c1af906a690df832340f45cfcde3570f902096f6f9cb19cdc061"} Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.258833 4760 scope.go:117] "RemoveContainer" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.266275 4760 generic.go:334] "Generic (PLEG): container finished" podID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerID="a5d94d2f056d8ab54ea8bfa3813d0c5e2cefe5d64573819f43b77eac24a33b5e" exitCode=0 Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.267112 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerDied","Data":"a5d94d2f056d8ab54ea8bfa3813d0c5e2cefe5d64573819f43b77eac24a33b5e"} Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.319197 4760 scope.go:117] "RemoveContainer" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" Nov 24 17:22:20 crc kubenswrapper[4760]: E1124 17:22:20.321460 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91\": container with ID starting with 84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91 not found: ID does not exist" containerID="84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.321492 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91"} err="failed to get container status \"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91\": rpc error: code = NotFound desc = could not find container \"84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91\": container with ID starting with 84ee4791a99596c62129b73a8afef4fb2ee8f19ea83e03e8768e97c6a60c5f91 not found: ID does not exist" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.321524 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.337196 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.351472 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:20 crc kubenswrapper[4760]: E1124 17:22:20.352038 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerName="nova-scheduler-scheduler" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.352064 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerName="nova-scheduler-scheduler" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.352309 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" containerName="nova-scheduler-scheduler" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.353124 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.355422 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.368980 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.405302 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436193 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data\") pod \"226a3269-3db4-471f-947b-44f0d9e1f5af\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436255 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2hm9\" (UniqueName: \"kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9\") pod \"226a3269-3db4-471f-947b-44f0d9e1f5af\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436298 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs\") pod \"226a3269-3db4-471f-947b-44f0d9e1f5af\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436332 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle\") pod \"226a3269-3db4-471f-947b-44f0d9e1f5af\" (UID: \"226a3269-3db4-471f-947b-44f0d9e1f5af\") " Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436893 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.436955 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.437037 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w5cr\" (UniqueName: \"kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.437423 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs" (OuterVolumeSpecName: "logs") pod "226a3269-3db4-471f-947b-44f0d9e1f5af" (UID: "226a3269-3db4-471f-947b-44f0d9e1f5af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.440293 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9" (OuterVolumeSpecName: "kube-api-access-w2hm9") pod "226a3269-3db4-471f-947b-44f0d9e1f5af" (UID: "226a3269-3db4-471f-947b-44f0d9e1f5af"). InnerVolumeSpecName "kube-api-access-w2hm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.460621 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data" (OuterVolumeSpecName: "config-data") pod "226a3269-3db4-471f-947b-44f0d9e1f5af" (UID: "226a3269-3db4-471f-947b-44f0d9e1f5af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.468381 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "226a3269-3db4-471f-947b-44f0d9e1f5af" (UID: "226a3269-3db4-471f-947b-44f0d9e1f5af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.538773 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539335 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539517 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w5cr\" (UniqueName: \"kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539949 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539969 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2hm9\" (UniqueName: \"kubernetes.io/projected/226a3269-3db4-471f-947b-44f0d9e1f5af-kube-api-access-w2hm9\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539982 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226a3269-3db4-471f-947b-44f0d9e1f5af-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.539994 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a3269-3db4-471f-947b-44f0d9e1f5af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.544578 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.549652 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.556097 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w5cr\" (UniqueName: \"kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr\") pod \"nova-scheduler-0\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " pod="openstack/nova-scheduler-0" Nov 24 17:22:20 crc kubenswrapper[4760]: I1124 17:22:20.718291 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:22:21 crc kubenswrapper[4760]: W1124 17:22:21.153575 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57ccb0e4_90b5_4029_b30f_4eb9973fa389.slice/crio-1278d351fd84805ea4b0b99a909f8eaa4f3682661b3a58b83230a5963a195201 WatchSource:0}: Error finding container 1278d351fd84805ea4b0b99a909f8eaa4f3682661b3a58b83230a5963a195201: Status 404 returned error can't find the container with id 1278d351fd84805ea4b0b99a909f8eaa4f3682661b3a58b83230a5963a195201 Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.158183 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.282332 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57ccb0e4-90b5-4029-b30f-4eb9973fa389","Type":"ContainerStarted","Data":"1278d351fd84805ea4b0b99a909f8eaa4f3682661b3a58b83230a5963a195201"} Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.285314 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"226a3269-3db4-471f-947b-44f0d9e1f5af","Type":"ContainerDied","Data":"2f4f68e47f10a1d39f768cc0f94d1f7ef3762dde7e216cc9d151e2aec49750f3"} Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.285354 4760 scope.go:117] "RemoveContainer" containerID="a5d94d2f056d8ab54ea8bfa3813d0c5e2cefe5d64573819f43b77eac24a33b5e" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.285461 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.331275 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.341334 4760 scope.go:117] "RemoveContainer" containerID="422fddc4c0bc32cc8972758d507d51d1a5b22534d1848b583ffc4e7ef3089357" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.344654 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.357769 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:21 crc kubenswrapper[4760]: E1124 17:22:21.358385 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-api" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.358418 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-api" Nov 24 17:22:21 crc kubenswrapper[4760]: E1124 17:22:21.358479 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-log" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.358492 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-log" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.358779 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-api" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.358822 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" containerName="nova-api-log" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.360429 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.363290 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.370206 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.456606 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.456842 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kklmp\" (UniqueName: \"kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.456918 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.457026 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.476283 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="226a3269-3db4-471f-947b-44f0d9e1f5af" path="/var/lib/kubelet/pods/226a3269-3db4-471f-947b-44f0d9e1f5af/volumes" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.476854 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="586a40e1-bd4d-46ed-ba0a-8c0f83a52996" path="/var/lib/kubelet/pods/586a40e1-bd4d-46ed-ba0a-8c0f83a52996/volumes" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.559344 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.559525 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kklmp\" (UniqueName: \"kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.559597 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.559650 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.560149 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.565085 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.565353 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.590704 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kklmp\" (UniqueName: \"kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp\") pod \"nova-api-0\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " pod="openstack/nova-api-0" Nov 24 17:22:21 crc kubenswrapper[4760]: I1124 17:22:21.680923 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.141853 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:22 crc kubenswrapper[4760]: W1124 17:22:22.145576 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod036d6371_67d8_404b_94dc_9d001f0ba6d5.slice/crio-1a6bb1e8a28ee6033f4aaa77961bb8e9362b56618b1cdaf015108b67cf7649dd WatchSource:0}: Error finding container 1a6bb1e8a28ee6033f4aaa77961bb8e9362b56618b1cdaf015108b67cf7649dd: Status 404 returned error can't find the container with id 1a6bb1e8a28ee6033f4aaa77961bb8e9362b56618b1cdaf015108b67cf7649dd Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.304223 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerStarted","Data":"1a6bb1e8a28ee6033f4aaa77961bb8e9362b56618b1cdaf015108b67cf7649dd"} Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.306644 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57ccb0e4-90b5-4029-b30f-4eb9973fa389","Type":"ContainerStarted","Data":"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584"} Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.333860 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.333844083 podStartE2EDuration="2.333844083s" podCreationTimestamp="2025-11-24 17:22:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:22.327577153 +0000 UTC m=+1137.650458703" watchObservedRunningTime="2025-11-24 17:22:22.333844083 +0000 UTC m=+1137.656725633" Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.626112 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:22:22 crc kubenswrapper[4760]: I1124 17:22:22.626170 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:22:23 crc kubenswrapper[4760]: I1124 17:22:23.321941 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerStarted","Data":"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952"} Nov 24 17:22:23 crc kubenswrapper[4760]: I1124 17:22:23.322298 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerStarted","Data":"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55"} Nov 24 17:22:23 crc kubenswrapper[4760]: I1124 17:22:23.348335 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.348307021 podStartE2EDuration="2.348307021s" podCreationTimestamp="2025-11-24 17:22:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:23.344215454 +0000 UTC m=+1138.667097004" watchObservedRunningTime="2025-11-24 17:22:23.348307021 +0000 UTC m=+1138.671188611" Nov 24 17:22:24 crc kubenswrapper[4760]: E1124 17:22:24.782473 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb140b79a_3f9b_4909_bf34_2be905ddf6b0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3003350b_62f1_4eb7_b044_bc0e8b007ef5.slice\": RecentStats: unable to find data in memory cache]" Nov 24 17:22:25 crc kubenswrapper[4760]: I1124 17:22:25.647426 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 24 17:22:25 crc kubenswrapper[4760]: I1124 17:22:25.718834 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 17:22:27 crc kubenswrapper[4760]: I1124 17:22:27.098856 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 17:22:27 crc kubenswrapper[4760]: I1124 17:22:27.626194 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 17:22:27 crc kubenswrapper[4760]: I1124 17:22:27.626254 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 17:22:28 crc kubenswrapper[4760]: I1124 17:22:28.646261 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:28 crc kubenswrapper[4760]: I1124 17:22:28.646264 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:30 crc kubenswrapper[4760]: I1124 17:22:30.474294 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:30 crc kubenswrapper[4760]: I1124 17:22:30.474540 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="54671374-7f83-4a6d-98a2-c5371e84a5f7" containerName="kube-state-metrics" containerID="cri-o://816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc" gracePeriod=30 Nov 24 17:22:30 crc kubenswrapper[4760]: I1124 17:22:30.718478 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 17:22:30 crc kubenswrapper[4760]: I1124 17:22:30.754672 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.024122 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.142545 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm5lg\" (UniqueName: \"kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg\") pod \"54671374-7f83-4a6d-98a2-c5371e84a5f7\" (UID: \"54671374-7f83-4a6d-98a2-c5371e84a5f7\") " Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.147403 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg" (OuterVolumeSpecName: "kube-api-access-fm5lg") pod "54671374-7f83-4a6d-98a2-c5371e84a5f7" (UID: "54671374-7f83-4a6d-98a2-c5371e84a5f7"). InnerVolumeSpecName "kube-api-access-fm5lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.244360 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm5lg\" (UniqueName: \"kubernetes.io/projected/54671374-7f83-4a6d-98a2-c5371e84a5f7-kube-api-access-fm5lg\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.401787 4760 generic.go:334] "Generic (PLEG): container finished" podID="54671374-7f83-4a6d-98a2-c5371e84a5f7" containerID="816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc" exitCode=2 Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.401845 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.401847 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54671374-7f83-4a6d-98a2-c5371e84a5f7","Type":"ContainerDied","Data":"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc"} Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.401906 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"54671374-7f83-4a6d-98a2-c5371e84a5f7","Type":"ContainerDied","Data":"c38a2a8a988e4ce5dda78ac28a7b9863b28bf2ad9d50e92ee4fdd624dfce9018"} Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.401930 4760 scope.go:117] "RemoveContainer" containerID="816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.430604 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.434420 4760 scope.go:117] "RemoveContainer" containerID="816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc" Nov 24 17:22:31 crc kubenswrapper[4760]: E1124 17:22:31.434800 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc\": container with ID starting with 816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc not found: ID does not exist" containerID="816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.434827 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc"} err="failed to get container status \"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc\": rpc error: code = NotFound desc = could not find container \"816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc\": container with ID starting with 816fb8a603d497709a935f36f2b22d32a113c67b0e154cdc9d8092493daeb4fc not found: ID does not exist" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.443491 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.451920 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.460129 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:31 crc kubenswrapper[4760]: E1124 17:22:31.460541 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54671374-7f83-4a6d-98a2-c5371e84a5f7" containerName="kube-state-metrics" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.460559 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="54671374-7f83-4a6d-98a2-c5371e84a5f7" containerName="kube-state-metrics" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.460751 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="54671374-7f83-4a6d-98a2-c5371e84a5f7" containerName="kube-state-metrics" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.461388 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.463419 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.463787 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.483413 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54671374-7f83-4a6d-98a2-c5371e84a5f7" path="/var/lib/kubelet/pods/54671374-7f83-4a6d-98a2-c5371e84a5f7/volumes" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.489479 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.649376 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.650036 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.650178 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.650285 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdb2c\" (UniqueName: \"kubernetes.io/projected/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-api-access-mdb2c\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.682090 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.682162 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.752311 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.752364 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdb2c\" (UniqueName: \"kubernetes.io/projected/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-api-access-mdb2c\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.752454 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.752532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.759238 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.762526 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.770149 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.774687 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdb2c\" (UniqueName: \"kubernetes.io/projected/33972ca1-3846-487a-a8b0-fb67093b1a6d-kube-api-access-mdb2c\") pod \"kube-state-metrics-0\" (UID: \"33972ca1-3846-487a-a8b0-fb67093b1a6d\") " pod="openstack/kube-state-metrics-0" Nov 24 17:22:31 crc kubenswrapper[4760]: I1124 17:22:31.786802 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.322438 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.413670 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"33972ca1-3846-487a-a8b0-fb67093b1a6d","Type":"ContainerStarted","Data":"c9bf38bd1469d22c458fd63203f916d9e9e62be1f082b2f8ccdb77766aed51c5"} Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.500378 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.504270 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-central-agent" containerID="cri-o://2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf" gracePeriod=30 Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.504489 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="proxy-httpd" containerID="cri-o://4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df" gracePeriod=30 Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.504725 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-notification-agent" containerID="cri-o://f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b" gracePeriod=30 Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.504708 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="sg-core" containerID="cri-o://909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1" gracePeriod=30 Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.764319 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:32 crc kubenswrapper[4760]: I1124 17:22:32.764357 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424191 4760 generic.go:334] "Generic (PLEG): container finished" podID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerID="4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df" exitCode=0 Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424613 4760 generic.go:334] "Generic (PLEG): container finished" podID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerID="909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1" exitCode=2 Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424629 4760 generic.go:334] "Generic (PLEG): container finished" podID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerID="2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf" exitCode=0 Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424270 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerDied","Data":"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df"} Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424715 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerDied","Data":"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1"} Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.424729 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerDied","Data":"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf"} Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.426682 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"33972ca1-3846-487a-a8b0-fb67093b1a6d","Type":"ContainerStarted","Data":"d481c2bfff41be75e73931b1c76a53aebd5ff875ce3943f5ccedd3db0a2aec45"} Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.426840 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 17:22:33 crc kubenswrapper[4760]: I1124 17:22:33.447436 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.005129903 podStartE2EDuration="2.447416446s" podCreationTimestamp="2025-11-24 17:22:31 +0000 UTC" firstStartedPulling="2025-11-24 17:22:32.32797992 +0000 UTC m=+1147.650861470" lastFinishedPulling="2025-11-24 17:22:32.770266463 +0000 UTC m=+1148.093148013" observedRunningTime="2025-11-24 17:22:33.443311399 +0000 UTC m=+1148.766192959" watchObservedRunningTime="2025-11-24 17:22:33.447416446 +0000 UTC m=+1148.770297996" Nov 24 17:22:35 crc kubenswrapper[4760]: I1124 17:22:35.642897 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:22:35 crc kubenswrapper[4760]: I1124 17:22:35.643515 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:22:35 crc kubenswrapper[4760]: I1124 17:22:35.914299 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.048870 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.048922 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.048994 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8jzr\" (UniqueName: \"kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.049077 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.049238 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.049278 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.049319 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data\") pod \"69c66cac-178f-40ee-99c8-fe71ced5126c\" (UID: \"69c66cac-178f-40ee-99c8-fe71ced5126c\") " Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.049907 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.050230 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.055233 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts" (OuterVolumeSpecName: "scripts") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.055307 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr" (OuterVolumeSpecName: "kube-api-access-h8jzr") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "kube-api-access-h8jzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.078136 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.122268 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150814 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150839 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150851 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69c66cac-178f-40ee-99c8-fe71ced5126c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150858 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150868 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8jzr\" (UniqueName: \"kubernetes.io/projected/69c66cac-178f-40ee-99c8-fe71ced5126c-kube-api-access-h8jzr\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.150877 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.169446 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data" (OuterVolumeSpecName: "config-data") pod "69c66cac-178f-40ee-99c8-fe71ced5126c" (UID: "69c66cac-178f-40ee-99c8-fe71ced5126c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.252323 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69c66cac-178f-40ee-99c8-fe71ced5126c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.455705 4760 generic.go:334] "Generic (PLEG): container finished" podID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerID="f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b" exitCode=0 Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.455736 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerDied","Data":"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b"} Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.455755 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.455775 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69c66cac-178f-40ee-99c8-fe71ced5126c","Type":"ContainerDied","Data":"7a0eef93cc92fd4bf636007c627a6e8257dbf8715fcd48473b3901e69afa6eb7"} Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.455793 4760 scope.go:117] "RemoveContainer" containerID="4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.473275 4760 scope.go:117] "RemoveContainer" containerID="909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.493434 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.498525 4760 scope.go:117] "RemoveContainer" containerID="f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.506288 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.519350 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.519978 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-notification-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.519995 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-notification-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.520046 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="sg-core" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520057 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="sg-core" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.520113 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="proxy-httpd" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520126 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="proxy-httpd" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.520617 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-central-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520630 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-central-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520847 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-central-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520869 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="proxy-httpd" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520888 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="sg-core" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.520897 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" containerName="ceilometer-notification-agent" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.525592 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.527358 4760 scope.go:117] "RemoveContainer" containerID="2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.528341 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.528744 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.528823 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.536372 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.557837 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.557887 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.557955 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.558116 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.558165 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.558190 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.558239 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdm8v\" (UniqueName: \"kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.558256 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.570314 4760 scope.go:117] "RemoveContainer" containerID="4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.570594 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df\": container with ID starting with 4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df not found: ID does not exist" containerID="4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.570653 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df"} err="failed to get container status \"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df\": rpc error: code = NotFound desc = could not find container \"4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df\": container with ID starting with 4c21e64bc61e91e5ae49f841b40c945984206ccc3bc65af4310fb116ce8817df not found: ID does not exist" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.570674 4760 scope.go:117] "RemoveContainer" containerID="909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.570858 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1\": container with ID starting with 909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1 not found: ID does not exist" containerID="909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.570877 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1"} err="failed to get container status \"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1\": rpc error: code = NotFound desc = could not find container \"909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1\": container with ID starting with 909aff84b294e96bfd3a3168828b0739688594df554609b35482c1452394dbc1 not found: ID does not exist" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.570891 4760 scope.go:117] "RemoveContainer" containerID="f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.571099 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b\": container with ID starting with f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b not found: ID does not exist" containerID="f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.571116 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b"} err="failed to get container status \"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b\": rpc error: code = NotFound desc = could not find container \"f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b\": container with ID starting with f1028b9996327335eeb9363fc8a1d83d52f9ea67e7686fb1c37feb133de3340b not found: ID does not exist" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.571130 4760 scope.go:117] "RemoveContainer" containerID="2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf" Nov 24 17:22:36 crc kubenswrapper[4760]: E1124 17:22:36.571285 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf\": container with ID starting with 2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf not found: ID does not exist" containerID="2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.571307 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf"} err="failed to get container status \"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf\": rpc error: code = NotFound desc = could not find container \"2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf\": container with ID starting with 2962e3c889d0e12f4aa0a637f9c42e1318dff9973926cdc2547c78b34ee20ddf not found: ID does not exist" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660186 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660255 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660288 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdm8v\" (UniqueName: \"kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660304 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660349 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660370 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660425 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.660494 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.661462 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.661603 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.665504 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.666346 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.672576 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.673113 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.676244 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.677964 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdm8v\" (UniqueName: \"kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v\") pod \"ceilometer-0\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " pod="openstack/ceilometer-0" Nov 24 17:22:36 crc kubenswrapper[4760]: I1124 17:22:36.870871 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.351930 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:37 crc kubenswrapper[4760]: W1124 17:22:37.356189 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7af96892_b69b_4456_8634_741ec91cda2e.slice/crio-093c25a5bbb70ab58a7de610a696ada4a3f9849877e425ebd0c7594c3ba4ff0b WatchSource:0}: Error finding container 093c25a5bbb70ab58a7de610a696ada4a3f9849877e425ebd0c7594c3ba4ff0b: Status 404 returned error can't find the container with id 093c25a5bbb70ab58a7de610a696ada4a3f9849877e425ebd0c7594c3ba4ff0b Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.478834 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69c66cac-178f-40ee-99c8-fe71ced5126c" path="/var/lib/kubelet/pods/69c66cac-178f-40ee-99c8-fe71ced5126c/volumes" Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.479910 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerStarted","Data":"093c25a5bbb70ab58a7de610a696ada4a3f9849877e425ebd0c7594c3ba4ff0b"} Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.632705 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.633805 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 17:22:37 crc kubenswrapper[4760]: I1124 17:22:37.637731 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 17:22:38 crc kubenswrapper[4760]: I1124 17:22:38.488712 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerStarted","Data":"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6"} Nov 24 17:22:38 crc kubenswrapper[4760]: I1124 17:22:38.493092 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.380130 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.499542 4760 generic.go:334] "Generic (PLEG): container finished" podID="9b4ca074-b020-4361-8308-f09a09c1bcff" containerID="777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4" exitCode=137 Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.499588 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.499627 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b4ca074-b020-4361-8308-f09a09c1bcff","Type":"ContainerDied","Data":"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4"} Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.499687 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b4ca074-b020-4361-8308-f09a09c1bcff","Type":"ContainerDied","Data":"b425d1d4f6a8798fde81cfa30a7306b0739883bb97b0b4114ac194a78cc49dff"} Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.499783 4760 scope.go:117] "RemoveContainer" containerID="777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.505979 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerStarted","Data":"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258"} Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.522537 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data\") pod \"9b4ca074-b020-4361-8308-f09a09c1bcff\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.522678 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") pod \"9b4ca074-b020-4361-8308-f09a09c1bcff\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.522827 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zvlr\" (UniqueName: \"kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr\") pod \"9b4ca074-b020-4361-8308-f09a09c1bcff\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.528683 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr" (OuterVolumeSpecName: "kube-api-access-9zvlr") pod "9b4ca074-b020-4361-8308-f09a09c1bcff" (UID: "9b4ca074-b020-4361-8308-f09a09c1bcff"). InnerVolumeSpecName "kube-api-access-9zvlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.528990 4760 scope.go:117] "RemoveContainer" containerID="777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4" Nov 24 17:22:39 crc kubenswrapper[4760]: E1124 17:22:39.529562 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4\": container with ID starting with 777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4 not found: ID does not exist" containerID="777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.529615 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4"} err="failed to get container status \"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4\": rpc error: code = NotFound desc = could not find container \"777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4\": container with ID starting with 777ced216e8c6d8c3ae1ab0c15efdbb64bb046dbfc6379e66541c945dd8085b4 not found: ID does not exist" Nov 24 17:22:39 crc kubenswrapper[4760]: E1124 17:22:39.549544 4760 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle podName:9b4ca074-b020-4361-8308-f09a09c1bcff nodeName:}" failed. No retries permitted until 2025-11-24 17:22:40.049513983 +0000 UTC m=+1155.372395533 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle") pod "9b4ca074-b020-4361-8308-f09a09c1bcff" (UID: "9b4ca074-b020-4361-8308-f09a09c1bcff") : error deleting /var/lib/kubelet/pods/9b4ca074-b020-4361-8308-f09a09c1bcff/volume-subpaths: remove /var/lib/kubelet/pods/9b4ca074-b020-4361-8308-f09a09c1bcff/volume-subpaths: no such file or directory Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.552216 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data" (OuterVolumeSpecName: "config-data") pod "9b4ca074-b020-4361-8308-f09a09c1bcff" (UID: "9b4ca074-b020-4361-8308-f09a09c1bcff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.625262 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zvlr\" (UniqueName: \"kubernetes.io/projected/9b4ca074-b020-4361-8308-f09a09c1bcff-kube-api-access-9zvlr\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:39 crc kubenswrapper[4760]: I1124 17:22:39.625307 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.133444 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") pod \"9b4ca074-b020-4361-8308-f09a09c1bcff\" (UID: \"9b4ca074-b020-4361-8308-f09a09c1bcff\") " Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.139356 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b4ca074-b020-4361-8308-f09a09c1bcff" (UID: "9b4ca074-b020-4361-8308-f09a09c1bcff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.235877 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b4ca074-b020-4361-8308-f09a09c1bcff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.433578 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.442869 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.455805 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:40 crc kubenswrapper[4760]: E1124 17:22:40.456249 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b4ca074-b020-4361-8308-f09a09c1bcff" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.456270 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b4ca074-b020-4361-8308-f09a09c1bcff" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.456451 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b4ca074-b020-4361-8308-f09a09c1bcff" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.457058 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.459900 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.459989 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.460178 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.464760 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.517704 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerStarted","Data":"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536"} Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.643975 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.644127 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.644186 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.644208 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5ldz\" (UniqueName: \"kubernetes.io/projected/d00b658b-a227-4c0c-9f91-d1c09d5f6173-kube-api-access-w5ldz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.644526 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.745490 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.745539 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.745560 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5ldz\" (UniqueName: \"kubernetes.io/projected/d00b658b-a227-4c0c-9f91-d1c09d5f6173-kube-api-access-w5ldz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.745628 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.745709 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.751967 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.752976 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.754896 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.762716 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00b658b-a227-4c0c-9f91-d1c09d5f6173-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.763066 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5ldz\" (UniqueName: \"kubernetes.io/projected/d00b658b-a227-4c0c-9f91-d1c09d5f6173-kube-api-access-w5ldz\") pod \"nova-cell1-novncproxy-0\" (UID: \"d00b658b-a227-4c0c-9f91-d1c09d5f6173\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:40 crc kubenswrapper[4760]: I1124 17:22:40.784925 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.275650 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 17:22:41 crc kubenswrapper[4760]: W1124 17:22:41.280670 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd00b658b_a227_4c0c_9f91_d1c09d5f6173.slice/crio-c39500b237e94cabea597585bdf0a14a5f273a5c0c785b6509a7479a555f9d92 WatchSource:0}: Error finding container c39500b237e94cabea597585bdf0a14a5f273a5c0c785b6509a7479a555f9d92: Status 404 returned error can't find the container with id c39500b237e94cabea597585bdf0a14a5f273a5c0c785b6509a7479a555f9d92 Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.477072 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b4ca074-b020-4361-8308-f09a09c1bcff" path="/var/lib/kubelet/pods/9b4ca074-b020-4361-8308-f09a09c1bcff/volumes" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.531509 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerStarted","Data":"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac"} Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.531664 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.532995 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d00b658b-a227-4c0c-9f91-d1c09d5f6173","Type":"ContainerStarted","Data":"cb5c03d81520f41f8da3b68edece84a073499a4c2c87385cbb2770a6f1c85e8b"} Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.533040 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d00b658b-a227-4c0c-9f91-d1c09d5f6173","Type":"ContainerStarted","Data":"c39500b237e94cabea597585bdf0a14a5f273a5c0c785b6509a7479a555f9d92"} Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.570877 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.005861811 podStartE2EDuration="5.570860721s" podCreationTimestamp="2025-11-24 17:22:36 +0000 UTC" firstStartedPulling="2025-11-24 17:22:37.358625106 +0000 UTC m=+1152.681506656" lastFinishedPulling="2025-11-24 17:22:40.923624016 +0000 UTC m=+1156.246505566" observedRunningTime="2025-11-24 17:22:41.56870613 +0000 UTC m=+1156.891587690" watchObservedRunningTime="2025-11-24 17:22:41.570860721 +0000 UTC m=+1156.893742271" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.600190 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.600174871 podStartE2EDuration="1.600174871s" podCreationTimestamp="2025-11-24 17:22:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:41.593792868 +0000 UTC m=+1156.916674418" watchObservedRunningTime="2025-11-24 17:22:41.600174871 +0000 UTC m=+1156.923056421" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.685171 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.685908 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.686333 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.690099 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 17:22:41 crc kubenswrapper[4760]: I1124 17:22:41.798276 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.542591 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.545948 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.772784 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.774323 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.793392 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.898187 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.898271 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.898303 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z4x8\" (UniqueName: \"kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.898563 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.898607 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:42 crc kubenswrapper[4760]: I1124 17:22:42.899031 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000413 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000460 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000508 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000538 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000566 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.000583 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z4x8\" (UniqueName: \"kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.002224 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.002527 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.002987 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.004292 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.004468 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.021791 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z4x8\" (UniqueName: \"kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8\") pod \"dnsmasq-dns-89c5cd4d5-xr5zm\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.098619 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:43 crc kubenswrapper[4760]: I1124 17:22:43.587446 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:22:44 crc kubenswrapper[4760]: I1124 17:22:44.569917 4760 generic.go:334] "Generic (PLEG): container finished" podID="6e587246-348f-4c91-96ce-88dd0beafac8" containerID="88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f" exitCode=0 Nov 24 17:22:44 crc kubenswrapper[4760]: I1124 17:22:44.569979 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" event={"ID":"6e587246-348f-4c91-96ce-88dd0beafac8","Type":"ContainerDied","Data":"88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f"} Nov 24 17:22:44 crc kubenswrapper[4760]: I1124 17:22:44.570360 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" event={"ID":"6e587246-348f-4c91-96ce-88dd0beafac8","Type":"ContainerStarted","Data":"7e2c27049b307dd16a8a5fc61a0430c0cf7d0ea6d775fba40659dcb29bfe4f92"} Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.034545 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.038462 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="sg-core" containerID="cri-o://b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.038511 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="proxy-httpd" containerID="cri-o://7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.038470 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-notification-agent" containerID="cri-o://6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.038717 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-central-agent" containerID="cri-o://72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.171922 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:45 crc kubenswrapper[4760]: E1124 17:22:45.284440 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7af96892_b69b_4456_8634_741ec91cda2e.slice/crio-7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7af96892_b69b_4456_8634_741ec91cda2e.slice/crio-conmon-b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7af96892_b69b_4456_8634_741ec91cda2e.slice/crio-conmon-7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac.scope\": RecentStats: unable to find data in memory cache]" Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.580987 4760 generic.go:334] "Generic (PLEG): container finished" podID="7af96892-b69b-4456-8634-741ec91cda2e" containerID="7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac" exitCode=0 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.581342 4760 generic.go:334] "Generic (PLEG): container finished" podID="7af96892-b69b-4456-8634-741ec91cda2e" containerID="b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536" exitCode=2 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.581355 4760 generic.go:334] "Generic (PLEG): container finished" podID="7af96892-b69b-4456-8634-741ec91cda2e" containerID="6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258" exitCode=0 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.581402 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerDied","Data":"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac"} Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.581434 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerDied","Data":"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536"} Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.581450 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerDied","Data":"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258"} Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.583729 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-log" containerID="cri-o://5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.584903 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" event={"ID":"6e587246-348f-4c91-96ce-88dd0beafac8","Type":"ContainerStarted","Data":"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde"} Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.584943 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.585320 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-api" containerID="cri-o://bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952" gracePeriod=30 Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.614171 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" podStartSLOduration=3.614148596 podStartE2EDuration="3.614148596s" podCreationTimestamp="2025-11-24 17:22:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:45.608828744 +0000 UTC m=+1160.931710314" watchObservedRunningTime="2025-11-24 17:22:45.614148596 +0000 UTC m=+1160.937030146" Nov 24 17:22:45 crc kubenswrapper[4760]: I1124 17:22:45.787271 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:46 crc kubenswrapper[4760]: I1124 17:22:46.596141 4760 generic.go:334] "Generic (PLEG): container finished" podID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerID="5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55" exitCode=143 Nov 24 17:22:46 crc kubenswrapper[4760]: I1124 17:22:46.596139 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerDied","Data":"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55"} Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.107529 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283106 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283207 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283293 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283453 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283543 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283597 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283601 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283684 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdm8v\" (UniqueName: \"kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283733 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs\") pod \"7af96892-b69b-4456-8634-741ec91cda2e\" (UID: \"7af96892-b69b-4456-8634-741ec91cda2e\") " Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.283769 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.284667 4760 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.284702 4760 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7af96892-b69b-4456-8634-741ec91cda2e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.288544 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v" (OuterVolumeSpecName: "kube-api-access-qdm8v") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "kube-api-access-qdm8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.292104 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts" (OuterVolumeSpecName: "scripts") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.323746 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.347719 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.381389 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data" (OuterVolumeSpecName: "config-data") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.387502 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.387535 4760 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.387547 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.387558 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdm8v\" (UniqueName: \"kubernetes.io/projected/7af96892-b69b-4456-8634-741ec91cda2e-kube-api-access-qdm8v\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.387602 4760 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.388115 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7af96892-b69b-4456-8634-741ec91cda2e" (UID: "7af96892-b69b-4456-8634-741ec91cda2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.489766 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7af96892-b69b-4456-8634-741ec91cda2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.609410 4760 generic.go:334] "Generic (PLEG): container finished" podID="7af96892-b69b-4456-8634-741ec91cda2e" containerID="72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6" exitCode=0 Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.609454 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerDied","Data":"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6"} Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.609462 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.609480 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7af96892-b69b-4456-8634-741ec91cda2e","Type":"ContainerDied","Data":"093c25a5bbb70ab58a7de610a696ada4a3f9849877e425ebd0c7594c3ba4ff0b"} Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.609497 4760 scope.go:117] "RemoveContainer" containerID="7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.637912 4760 scope.go:117] "RemoveContainer" containerID="b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.646792 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.654780 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.668630 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.668992 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-notification-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669024 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-notification-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.669051 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="proxy-httpd" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669058 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="proxy-httpd" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.669081 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="sg-core" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669087 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="sg-core" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.669098 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-central-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669104 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-central-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669287 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-central-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669302 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="ceilometer-notification-agent" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669317 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="sg-core" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.669330 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7af96892-b69b-4456-8634-741ec91cda2e" containerName="proxy-httpd" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.675958 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.679181 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.679606 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.679643 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.682461 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700423 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700472 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700494 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-log-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700565 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-run-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700614 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-config-data\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700630 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-scripts\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700658 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82tm8\" (UniqueName: \"kubernetes.io/projected/0aa23f2e-1d46-4435-abc2-e019f2070509-kube-api-access-82tm8\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.700714 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.726572 4760 scope.go:117] "RemoveContainer" containerID="6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.747133 4760 scope.go:117] "RemoveContainer" containerID="72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.767832 4760 scope.go:117] "RemoveContainer" containerID="7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.768369 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac\": container with ID starting with 7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac not found: ID does not exist" containerID="7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.768413 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac"} err="failed to get container status \"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac\": rpc error: code = NotFound desc = could not find container \"7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac\": container with ID starting with 7610a853705ef116be0b81ea8bd31c20d0e8d63cb46bb7f0f2e9c975868948ac not found: ID does not exist" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.768447 4760 scope.go:117] "RemoveContainer" containerID="b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.768816 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536\": container with ID starting with b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536 not found: ID does not exist" containerID="b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.768857 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536"} err="failed to get container status \"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536\": rpc error: code = NotFound desc = could not find container \"b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536\": container with ID starting with b5d707a717f9bd4c57a38723634153561983e062ab4041f35936db8f3be23536 not found: ID does not exist" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.768886 4760 scope.go:117] "RemoveContainer" containerID="6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.769279 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258\": container with ID starting with 6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258 not found: ID does not exist" containerID="6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.769303 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258"} err="failed to get container status \"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258\": rpc error: code = NotFound desc = could not find container \"6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258\": container with ID starting with 6f18ba70c1bbeb6222ba8bf0a42921143b01dfb7f6d333759f6ccd0a709c3258 not found: ID does not exist" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.769321 4760 scope.go:117] "RemoveContainer" containerID="72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6" Nov 24 17:22:47 crc kubenswrapper[4760]: E1124 17:22:47.769557 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6\": container with ID starting with 72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6 not found: ID does not exist" containerID="72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.769578 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6"} err="failed to get container status \"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6\": rpc error: code = NotFound desc = could not find container \"72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6\": container with ID starting with 72c0f01758f09c6de1678fdac11ef22738a830ef5b24a0eebf3ef6ed2e5f8cd6 not found: ID does not exist" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.801829 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82tm8\" (UniqueName: \"kubernetes.io/projected/0aa23f2e-1d46-4435-abc2-e019f2070509-kube-api-access-82tm8\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.801934 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802149 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802193 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802223 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-log-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802319 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-run-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802399 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-config-data\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802425 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-scripts\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.802861 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-log-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.803223 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0aa23f2e-1d46-4435-abc2-e019f2070509-run-httpd\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.807572 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.807575 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.808090 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-scripts\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.808411 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.808938 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0aa23f2e-1d46-4435-abc2-e019f2070509-config-data\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:47 crc kubenswrapper[4760]: I1124 17:22:47.818300 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82tm8\" (UniqueName: \"kubernetes.io/projected/0aa23f2e-1d46-4435-abc2-e019f2070509-kube-api-access-82tm8\") pod \"ceilometer-0\" (UID: \"0aa23f2e-1d46-4435-abc2-e019f2070509\") " pod="openstack/ceilometer-0" Nov 24 17:22:48 crc kubenswrapper[4760]: I1124 17:22:48.024091 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 17:22:48 crc kubenswrapper[4760]: I1124 17:22:48.549229 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 17:22:48 crc kubenswrapper[4760]: W1124 17:22:48.554033 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0aa23f2e_1d46_4435_abc2_e019f2070509.slice/crio-b0bf36a5f77c7f52a138f3e219104131590166c0204bc6747c49a7a61cf0c551 WatchSource:0}: Error finding container b0bf36a5f77c7f52a138f3e219104131590166c0204bc6747c49a7a61cf0c551: Status 404 returned error can't find the container with id b0bf36a5f77c7f52a138f3e219104131590166c0204bc6747c49a7a61cf0c551 Nov 24 17:22:48 crc kubenswrapper[4760]: I1124 17:22:48.627653 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0aa23f2e-1d46-4435-abc2-e019f2070509","Type":"ContainerStarted","Data":"b0bf36a5f77c7f52a138f3e219104131590166c0204bc6747c49a7a61cf0c551"} Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.211810 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.338674 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data\") pod \"036d6371-67d8-404b-94dc-9d001f0ba6d5\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.338802 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle\") pod \"036d6371-67d8-404b-94dc-9d001f0ba6d5\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.338878 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kklmp\" (UniqueName: \"kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp\") pod \"036d6371-67d8-404b-94dc-9d001f0ba6d5\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.339129 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs\") pod \"036d6371-67d8-404b-94dc-9d001f0ba6d5\" (UID: \"036d6371-67d8-404b-94dc-9d001f0ba6d5\") " Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.340252 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs" (OuterVolumeSpecName: "logs") pod "036d6371-67d8-404b-94dc-9d001f0ba6d5" (UID: "036d6371-67d8-404b-94dc-9d001f0ba6d5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.346706 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp" (OuterVolumeSpecName: "kube-api-access-kklmp") pod "036d6371-67d8-404b-94dc-9d001f0ba6d5" (UID: "036d6371-67d8-404b-94dc-9d001f0ba6d5"). InnerVolumeSpecName "kube-api-access-kklmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.374158 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data" (OuterVolumeSpecName: "config-data") pod "036d6371-67d8-404b-94dc-9d001f0ba6d5" (UID: "036d6371-67d8-404b-94dc-9d001f0ba6d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.384469 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "036d6371-67d8-404b-94dc-9d001f0ba6d5" (UID: "036d6371-67d8-404b-94dc-9d001f0ba6d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.441945 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.441988 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036d6371-67d8-404b-94dc-9d001f0ba6d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.442039 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kklmp\" (UniqueName: \"kubernetes.io/projected/036d6371-67d8-404b-94dc-9d001f0ba6d5-kube-api-access-kklmp\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.442053 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/036d6371-67d8-404b-94dc-9d001f0ba6d5-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.479629 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7af96892-b69b-4456-8634-741ec91cda2e" path="/var/lib/kubelet/pods/7af96892-b69b-4456-8634-741ec91cda2e/volumes" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.640887 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0aa23f2e-1d46-4435-abc2-e019f2070509","Type":"ContainerStarted","Data":"7484d956c5842c562309e5051bab40258fe35c7490324ff247b889040ae29b9b"} Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.643949 4760 generic.go:334] "Generic (PLEG): container finished" podID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerID="bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952" exitCode=0 Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.644182 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.644203 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerDied","Data":"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952"} Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.644241 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"036d6371-67d8-404b-94dc-9d001f0ba6d5","Type":"ContainerDied","Data":"1a6bb1e8a28ee6033f4aaa77961bb8e9362b56618b1cdaf015108b67cf7649dd"} Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.644271 4760 scope.go:117] "RemoveContainer" containerID="bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.721189 4760 scope.go:117] "RemoveContainer" containerID="5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.745113 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.772721 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.774471 4760 scope.go:117] "RemoveContainer" containerID="bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952" Nov 24 17:22:49 crc kubenswrapper[4760]: E1124 17:22:49.774959 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952\": container with ID starting with bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952 not found: ID does not exist" containerID="bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.775000 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952"} err="failed to get container status \"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952\": rpc error: code = NotFound desc = could not find container \"bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952\": container with ID starting with bd207826287e1ae56073d1e79c41ed0ab5416ed3426b6727b3fbd10be30b5952 not found: ID does not exist" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.775121 4760 scope.go:117] "RemoveContainer" containerID="5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55" Nov 24 17:22:49 crc kubenswrapper[4760]: E1124 17:22:49.775565 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55\": container with ID starting with 5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55 not found: ID does not exist" containerID="5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.775604 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55"} err="failed to get container status \"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55\": rpc error: code = NotFound desc = could not find container \"5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55\": container with ID starting with 5db2686f912e641342f2a5b739eabbc0b31309443089295fb817761ef49f2a55 not found: ID does not exist" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.780316 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:49 crc kubenswrapper[4760]: E1124 17:22:49.780736 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-log" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.780753 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-log" Nov 24 17:22:49 crc kubenswrapper[4760]: E1124 17:22:49.780772 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-api" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.780780 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-api" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.780946 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-api" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.780966 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" containerName="nova-api-log" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.781937 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.785318 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.785553 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.785650 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.785728 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849209 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849288 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849380 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849436 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fklg\" (UniqueName: \"kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.849591 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951049 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951118 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951161 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951185 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951207 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fklg\" (UniqueName: \"kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951257 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.951679 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.955904 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.956100 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.958507 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.962596 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:49 crc kubenswrapper[4760]: I1124 17:22:49.969709 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fklg\" (UniqueName: \"kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg\") pod \"nova-api-0\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " pod="openstack/nova-api-0" Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.107949 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.553488 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:50 crc kubenswrapper[4760]: W1124 17:22:50.562554 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd90484c_5dee_4a46_ad7c_60c82cf285e4.slice/crio-fd8655d98e51dbe8a73108335f5ad559692f5f4df1d9a496407e54daf34cf783 WatchSource:0}: Error finding container fd8655d98e51dbe8a73108335f5ad559692f5f4df1d9a496407e54daf34cf783: Status 404 returned error can't find the container with id fd8655d98e51dbe8a73108335f5ad559692f5f4df1d9a496407e54daf34cf783 Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.657221 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerStarted","Data":"fd8655d98e51dbe8a73108335f5ad559692f5f4df1d9a496407e54daf34cf783"} Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.660153 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0aa23f2e-1d46-4435-abc2-e019f2070509","Type":"ContainerStarted","Data":"cb2147fe46b0b98ff60dddd561eec8cff6bf4a6ab6eb1395537b76297331a5fd"} Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.788109 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:50 crc kubenswrapper[4760]: I1124 17:22:50.815320 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.476932 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="036d6371-67d8-404b-94dc-9d001f0ba6d5" path="/var/lib/kubelet/pods/036d6371-67d8-404b-94dc-9d001f0ba6d5/volumes" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.677978 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0aa23f2e-1d46-4435-abc2-e019f2070509","Type":"ContainerStarted","Data":"4fc4ecc23b7afd36f9580f4e877bee2d7a304c497d9bc3fffebe2ab7a9f585d6"} Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.680867 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerStarted","Data":"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f"} Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.680932 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerStarted","Data":"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0"} Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.709982 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.709957404 podStartE2EDuration="2.709957404s" podCreationTimestamp="2025-11-24 17:22:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:51.703582131 +0000 UTC m=+1167.026463681" watchObservedRunningTime="2025-11-24 17:22:51.709957404 +0000 UTC m=+1167.032838964" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.711076 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.879332 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-k26pp"] Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.881373 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.883384 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.883623 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.891110 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-k26pp"] Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.990100 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.990273 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.990398 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j7sq\" (UniqueName: \"kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:51 crc kubenswrapper[4760]: I1124 17:22:51.990463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.091905 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.092060 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.092134 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.092174 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j7sq\" (UniqueName: \"kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.097349 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.097573 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.107892 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.113778 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j7sq\" (UniqueName: \"kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq\") pod \"nova-cell1-cell-mapping-k26pp\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.249179 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.694890 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0aa23f2e-1d46-4435-abc2-e019f2070509","Type":"ContainerStarted","Data":"0abcba0a5bee85d7721df558590eb8c1ff0c9448695cb9d3ec4299f917272b64"} Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.695578 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.728512 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.193536828 podStartE2EDuration="5.728488077s" podCreationTimestamp="2025-11-24 17:22:47 +0000 UTC" firstStartedPulling="2025-11-24 17:22:48.556447444 +0000 UTC m=+1163.879328994" lastFinishedPulling="2025-11-24 17:22:52.091398693 +0000 UTC m=+1167.414280243" observedRunningTime="2025-11-24 17:22:52.715417623 +0000 UTC m=+1168.038299173" watchObservedRunningTime="2025-11-24 17:22:52.728488077 +0000 UTC m=+1168.051369627" Nov 24 17:22:52 crc kubenswrapper[4760]: I1124 17:22:52.758654 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-k26pp"] Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.100078 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.172915 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.173146 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="dnsmasq-dns" containerID="cri-o://88993599c4082243864390714cd9c961b2aa31184a1c1fbb31bedb247f4a2179" gracePeriod=10 Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.705536 4760 generic.go:334] "Generic (PLEG): container finished" podID="8b292f44-3605-46d8-950d-39454e65e258" containerID="88993599c4082243864390714cd9c961b2aa31184a1c1fbb31bedb247f4a2179" exitCode=0 Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.705575 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" event={"ID":"8b292f44-3605-46d8-950d-39454e65e258","Type":"ContainerDied","Data":"88993599c4082243864390714cd9c961b2aa31184a1c1fbb31bedb247f4a2179"} Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.705923 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" event={"ID":"8b292f44-3605-46d8-950d-39454e65e258","Type":"ContainerDied","Data":"2e2e8595778809b3eab741379b83bcc79451b77e457cb607dd8ba42ebfea7096"} Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.705940 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e2e8595778809b3eab741379b83bcc79451b77e457cb607dd8ba42ebfea7096" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.709333 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-k26pp" event={"ID":"332e7b13-fb92-4803-a09a-1d3368fa74a0","Type":"ContainerStarted","Data":"aea710e3b7d41f64eb2e4539c73e5baff1889513d0c522e32571b42dbe5f9ae8"} Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.709383 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-k26pp" event={"ID":"332e7b13-fb92-4803-a09a-1d3368fa74a0","Type":"ContainerStarted","Data":"f1ad706d7ee6b8c75c176bbd089308935e5cf324a80f6d0cb4b50519fe63e571"} Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.728934 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-k26pp" podStartSLOduration=2.728915323 podStartE2EDuration="2.728915323s" podCreationTimestamp="2025-11-24 17:22:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:22:53.726822163 +0000 UTC m=+1169.049703713" watchObservedRunningTime="2025-11-24 17:22:53.728915323 +0000 UTC m=+1169.051796883" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.738775 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.925912 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.925989 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swv6n\" (UniqueName: \"kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.926222 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.926465 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.926521 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.926601 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc\") pod \"8b292f44-3605-46d8-950d-39454e65e258\" (UID: \"8b292f44-3605-46d8-950d-39454e65e258\") " Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.952834 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n" (OuterVolumeSpecName: "kube-api-access-swv6n") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "kube-api-access-swv6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.985342 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.992784 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:53 crc kubenswrapper[4760]: I1124 17:22:53.994490 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.001550 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.014302 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config" (OuterVolumeSpecName: "config") pod "8b292f44-3605-46d8-950d-39454e65e258" (UID: "8b292f44-3605-46d8-950d-39454e65e258"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029263 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029527 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swv6n\" (UniqueName: \"kubernetes.io/projected/8b292f44-3605-46d8-950d-39454e65e258-kube-api-access-swv6n\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029663 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029766 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029864 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.029950 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b292f44-3605-46d8-950d-39454e65e258-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.719988 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.777828 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:54 crc kubenswrapper[4760]: I1124 17:22:54.787837 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-d5dth"] Nov 24 17:22:55 crc kubenswrapper[4760]: I1124 17:22:55.475620 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b292f44-3605-46d8-950d-39454e65e258" path="/var/lib/kubelet/pods/8b292f44-3605-46d8-950d-39454e65e258/volumes" Nov 24 17:22:57 crc kubenswrapper[4760]: I1124 17:22:57.750693 4760 generic.go:334] "Generic (PLEG): container finished" podID="332e7b13-fb92-4803-a09a-1d3368fa74a0" containerID="aea710e3b7d41f64eb2e4539c73e5baff1889513d0c522e32571b42dbe5f9ae8" exitCode=0 Nov 24 17:22:57 crc kubenswrapper[4760]: I1124 17:22:57.750746 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-k26pp" event={"ID":"332e7b13-fb92-4803-a09a-1d3368fa74a0","Type":"ContainerDied","Data":"aea710e3b7d41f64eb2e4539c73e5baff1889513d0c522e32571b42dbe5f9ae8"} Nov 24 17:22:58 crc kubenswrapper[4760]: I1124 17:22:58.697514 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-757b4f8459-d5dth" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.190:5353: i/o timeout" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.217596 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.341760 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j7sq\" (UniqueName: \"kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq\") pod \"332e7b13-fb92-4803-a09a-1d3368fa74a0\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.341840 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data\") pod \"332e7b13-fb92-4803-a09a-1d3368fa74a0\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.341898 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts\") pod \"332e7b13-fb92-4803-a09a-1d3368fa74a0\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.341925 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle\") pod \"332e7b13-fb92-4803-a09a-1d3368fa74a0\" (UID: \"332e7b13-fb92-4803-a09a-1d3368fa74a0\") " Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.349130 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq" (OuterVolumeSpecName: "kube-api-access-6j7sq") pod "332e7b13-fb92-4803-a09a-1d3368fa74a0" (UID: "332e7b13-fb92-4803-a09a-1d3368fa74a0"). InnerVolumeSpecName "kube-api-access-6j7sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.349945 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts" (OuterVolumeSpecName: "scripts") pod "332e7b13-fb92-4803-a09a-1d3368fa74a0" (UID: "332e7b13-fb92-4803-a09a-1d3368fa74a0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.376456 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data" (OuterVolumeSpecName: "config-data") pod "332e7b13-fb92-4803-a09a-1d3368fa74a0" (UID: "332e7b13-fb92-4803-a09a-1d3368fa74a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.377763 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "332e7b13-fb92-4803-a09a-1d3368fa74a0" (UID: "332e7b13-fb92-4803-a09a-1d3368fa74a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.444500 4760 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.444546 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.444561 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j7sq\" (UniqueName: \"kubernetes.io/projected/332e7b13-fb92-4803-a09a-1d3368fa74a0-kube-api-access-6j7sq\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.444573 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332e7b13-fb92-4803-a09a-1d3368fa74a0-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.773719 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-k26pp" event={"ID":"332e7b13-fb92-4803-a09a-1d3368fa74a0","Type":"ContainerDied","Data":"f1ad706d7ee6b8c75c176bbd089308935e5cf324a80f6d0cb4b50519fe63e571"} Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.773767 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1ad706d7ee6b8c75c176bbd089308935e5cf324a80f6d0cb4b50519fe63e571" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.773767 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-k26pp" Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.969490 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.970019 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerName="nova-scheduler-scheduler" containerID="cri-o://4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" gracePeriod=30 Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.986519 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.986807 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-log" containerID="cri-o://86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" gracePeriod=30 Nov 24 17:22:59 crc kubenswrapper[4760]: I1124 17:22:59.987049 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-api" containerID="cri-o://ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" gracePeriod=30 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.009732 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.009995 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" containerID="cri-o://9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812" gracePeriod=30 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.010154 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" containerID="cri-o://5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94" gracePeriod=30 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.561599 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.666860 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.667227 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.667305 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.667343 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.667377 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fklg\" (UniqueName: \"kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.667462 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs\") pod \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\" (UID: \"cd90484c-5dee-4a46-ad7c-60c82cf285e4\") " Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.668421 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs" (OuterVolumeSpecName: "logs") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.674942 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg" (OuterVolumeSpecName: "kube-api-access-7fklg") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "kube-api-access-7fklg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.702279 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data" (OuterVolumeSpecName: "config-data") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.702567 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.722728 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.727421 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.729392 4760 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.729486 4760 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerName="nova-scheduler-scheduler" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.736044 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.742044 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cd90484c-5dee-4a46-ad7c-60c82cf285e4" (UID: "cd90484c-5dee-4a46-ad7c-60c82cf285e4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769414 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fklg\" (UniqueName: \"kubernetes.io/projected/cd90484c-5dee-4a46-ad7c-60c82cf285e4-kube-api-access-7fklg\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769454 4760 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769468 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769480 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769491 4760 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd90484c-5dee-4a46-ad7c-60c82cf285e4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.769502 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd90484c-5dee-4a46-ad7c-60c82cf285e4-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785387 4760 generic.go:334] "Generic (PLEG): container finished" podID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerID="ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" exitCode=0 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785451 4760 generic.go:334] "Generic (PLEG): container finished" podID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerID="86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" exitCode=143 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785458 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerDied","Data":"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f"} Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785488 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785522 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerDied","Data":"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0"} Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785537 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd90484c-5dee-4a46-ad7c-60c82cf285e4","Type":"ContainerDied","Data":"fd8655d98e51dbe8a73108335f5ad559692f5f4df1d9a496407e54daf34cf783"} Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.785557 4760 scope.go:117] "RemoveContainer" containerID="ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.789849 4760 generic.go:334] "Generic (PLEG): container finished" podID="4b6228d3-58e5-48e3-b881-9381015853b3" containerID="9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812" exitCode=143 Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.789884 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerDied","Data":"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812"} Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.820252 4760 scope.go:117] "RemoveContainer" containerID="86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.833442 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.846983 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.854639 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855101 4760 scope.go:117] "RemoveContainer" containerID="ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855114 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="dnsmasq-dns" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855215 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="dnsmasq-dns" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855267 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-api" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855277 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-api" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855311 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-log" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855320 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-log" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855342 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="init" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855350 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="init" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855383 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332e7b13-fb92-4803-a09a-1d3368fa74a0" containerName="nova-manage" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855393 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="332e7b13-fb92-4803-a09a-1d3368fa74a0" containerName="nova-manage" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855744 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-api" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855772 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" containerName="nova-api-log" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855790 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b292f44-3605-46d8-950d-39454e65e258" containerName="dnsmasq-dns" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.855819 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="332e7b13-fb92-4803-a09a-1d3368fa74a0" containerName="nova-manage" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.855995 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f\": container with ID starting with ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f not found: ID does not exist" containerID="ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856063 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f"} err="failed to get container status \"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f\": rpc error: code = NotFound desc = could not find container \"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f\": container with ID starting with ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f not found: ID does not exist" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856090 4760 scope.go:117] "RemoveContainer" containerID="86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" Nov 24 17:23:00 crc kubenswrapper[4760]: E1124 17:23:00.856566 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0\": container with ID starting with 86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0 not found: ID does not exist" containerID="86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856596 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0"} err="failed to get container status \"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0\": rpc error: code = NotFound desc = could not find container \"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0\": container with ID starting with 86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0 not found: ID does not exist" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856617 4760 scope.go:117] "RemoveContainer" containerID="ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856954 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f"} err="failed to get container status \"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f\": rpc error: code = NotFound desc = could not find container \"ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f\": container with ID starting with ae6b61f84c1dc096c8279d89f1ec9d1c0091dd2916875efa480daceffcd27e9f not found: ID does not exist" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.856972 4760 scope.go:117] "RemoveContainer" containerID="86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.857236 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.857298 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0"} err="failed to get container status \"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0\": rpc error: code = NotFound desc = could not find container \"86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0\": container with ID starting with 86f213234659e7588dc1abf0177f795182bedea4ebe393e1e82d802febe048e0 not found: ID does not exist" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.859598 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.859744 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.860180 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.869186 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.979728 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.980082 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fcad265-b82c-400e-afce-ac2afac950d0-logs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.980371 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7s7m\" (UniqueName: \"kubernetes.io/projected/6fcad265-b82c-400e-afce-ac2afac950d0-kube-api-access-w7s7m\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.980530 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.980990 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:00 crc kubenswrapper[4760]: I1124 17:23:00.981224 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-config-data\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.083615 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.083953 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-config-data\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.084161 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.084333 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fcad265-b82c-400e-afce-ac2afac950d0-logs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.084531 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7s7m\" (UniqueName: \"kubernetes.io/projected/6fcad265-b82c-400e-afce-ac2afac950d0-kube-api-access-w7s7m\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.084732 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.084737 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fcad265-b82c-400e-afce-ac2afac950d0-logs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.088079 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.088234 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-public-tls-certs\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.089108 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.089723 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fcad265-b82c-400e-afce-ac2afac950d0-config-data\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.106541 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7s7m\" (UniqueName: \"kubernetes.io/projected/6fcad265-b82c-400e-afce-ac2afac950d0-kube-api-access-w7s7m\") pod \"nova-api-0\" (UID: \"6fcad265-b82c-400e-afce-ac2afac950d0\") " pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.192233 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.476281 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd90484c-5dee-4a46-ad7c-60c82cf285e4" path="/var/lib/kubelet/pods/cd90484c-5dee-4a46-ad7c-60c82cf285e4/volumes" Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.648953 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 17:23:01 crc kubenswrapper[4760]: W1124 17:23:01.652232 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fcad265_b82c_400e_afce_ac2afac950d0.slice/crio-0b2e72c4856f4fd3fd12974792d6f3968ed594661106b80428a7ec626dc53f3d WatchSource:0}: Error finding container 0b2e72c4856f4fd3fd12974792d6f3968ed594661106b80428a7ec626dc53f3d: Status 404 returned error can't find the container with id 0b2e72c4856f4fd3fd12974792d6f3968ed594661106b80428a7ec626dc53f3d Nov 24 17:23:01 crc kubenswrapper[4760]: I1124 17:23:01.800175 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6fcad265-b82c-400e-afce-ac2afac950d0","Type":"ContainerStarted","Data":"0b2e72c4856f4fd3fd12974792d6f3968ed594661106b80428a7ec626dc53f3d"} Nov 24 17:23:02 crc kubenswrapper[4760]: I1124 17:23:02.301628 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-69f4488969-xwpx8" podUID="37d3f873-9ed8-47d6-b62d-3b007dca3936" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 17:23:02 crc kubenswrapper[4760]: I1124 17:23:02.812310 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6fcad265-b82c-400e-afce-ac2afac950d0","Type":"ContainerStarted","Data":"09b0396391bc7a0a590951c802b58a226451cecf984f00b370f59e053a6357b2"} Nov 24 17:23:02 crc kubenswrapper[4760]: I1124 17:23:02.812371 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6fcad265-b82c-400e-afce-ac2afac950d0","Type":"ContainerStarted","Data":"8daf701f4289f3cee7a0e41f235aebecbb0e2861ed916ca21559fe7a497b37a5"} Nov 24 17:23:02 crc kubenswrapper[4760]: I1124 17:23:02.846091 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.846060103 podStartE2EDuration="2.846060103s" podCreationTimestamp="2025-11-24 17:23:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:23:02.836322174 +0000 UTC m=+1178.159203764" watchObservedRunningTime="2025-11-24 17:23:02.846060103 +0000 UTC m=+1178.168941693" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.146852 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": read tcp 10.217.0.2:38566->10.217.0.194:8775: read: connection reset by peer" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.147356 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": read tcp 10.217.0.2:38568->10.217.0.194:8775: read: connection reset by peer" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.616038 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.739515 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle\") pod \"4b6228d3-58e5-48e3-b881-9381015853b3\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.739577 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs\") pod \"4b6228d3-58e5-48e3-b881-9381015853b3\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.739625 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w5vt\" (UniqueName: \"kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt\") pod \"4b6228d3-58e5-48e3-b881-9381015853b3\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.739658 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs\") pod \"4b6228d3-58e5-48e3-b881-9381015853b3\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.739698 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data\") pod \"4b6228d3-58e5-48e3-b881-9381015853b3\" (UID: \"4b6228d3-58e5-48e3-b881-9381015853b3\") " Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.740717 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs" (OuterVolumeSpecName: "logs") pod "4b6228d3-58e5-48e3-b881-9381015853b3" (UID: "4b6228d3-58e5-48e3-b881-9381015853b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.747361 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt" (OuterVolumeSpecName: "kube-api-access-9w5vt") pod "4b6228d3-58e5-48e3-b881-9381015853b3" (UID: "4b6228d3-58e5-48e3-b881-9381015853b3"). InnerVolumeSpecName "kube-api-access-9w5vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.768144 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data" (OuterVolumeSpecName: "config-data") pod "4b6228d3-58e5-48e3-b881-9381015853b3" (UID: "4b6228d3-58e5-48e3-b881-9381015853b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.772615 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b6228d3-58e5-48e3-b881-9381015853b3" (UID: "4b6228d3-58e5-48e3-b881-9381015853b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.811234 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "4b6228d3-58e5-48e3-b881-9381015853b3" (UID: "4b6228d3-58e5-48e3-b881-9381015853b3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.822702 4760 generic.go:334] "Generic (PLEG): container finished" podID="4b6228d3-58e5-48e3-b881-9381015853b3" containerID="5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94" exitCode=0 Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.822762 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.822766 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerDied","Data":"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94"} Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.823518 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b6228d3-58e5-48e3-b881-9381015853b3","Type":"ContainerDied","Data":"95b650c14e2292bc9365b56e5beea9fa6c447288eda2d88d307ecf8ca4bd6128"} Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.823537 4760 scope.go:117] "RemoveContainer" containerID="5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.842646 4760 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.842675 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w5vt\" (UniqueName: \"kubernetes.io/projected/4b6228d3-58e5-48e3-b881-9381015853b3-kube-api-access-9w5vt\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.842686 4760 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b6228d3-58e5-48e3-b881-9381015853b3-logs\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.842695 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.842706 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b6228d3-58e5-48e3-b881-9381015853b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.855623 4760 scope.go:117] "RemoveContainer" containerID="9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.860573 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.870347 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.879644 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:03 crc kubenswrapper[4760]: E1124 17:23:03.880038 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.880055 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" Nov 24 17:23:03 crc kubenswrapper[4760]: E1124 17:23:03.880068 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.880074 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.880244 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-metadata" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.880266 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" containerName="nova-metadata-log" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.881197 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.883530 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.883612 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.884731 4760 scope.go:117] "RemoveContainer" containerID="5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94" Nov 24 17:23:03 crc kubenswrapper[4760]: E1124 17:23:03.887090 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94\": container with ID starting with 5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94 not found: ID does not exist" containerID="5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.887153 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94"} err="failed to get container status \"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94\": rpc error: code = NotFound desc = could not find container \"5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94\": container with ID starting with 5f6f7ae24a4969e8b977f89360946698997fbfbc6891e5599df12c4ac5b14f94 not found: ID does not exist" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.887177 4760 scope.go:117] "RemoveContainer" containerID="9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812" Nov 24 17:23:03 crc kubenswrapper[4760]: E1124 17:23:03.887539 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812\": container with ID starting with 9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812 not found: ID does not exist" containerID="9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.887563 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812"} err="failed to get container status \"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812\": rpc error: code = NotFound desc = could not find container \"9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812\": container with ID starting with 9e962faf72fbc2885001c221640718cd648807af3a28fad615916cc69077a812 not found: ID does not exist" Nov 24 17:23:03 crc kubenswrapper[4760]: I1124 17:23:03.891936 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.047315 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa127b75-3942-4fba-815f-197979d77117-logs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.047395 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-config-data\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.047420 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.047441 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.047682 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkjdn\" (UniqueName: \"kubernetes.io/projected/fa127b75-3942-4fba-815f-197979d77117-kube-api-access-zkjdn\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.149654 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa127b75-3942-4fba-815f-197979d77117-logs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.149726 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-config-data\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.149749 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.149769 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.149816 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkjdn\" (UniqueName: \"kubernetes.io/projected/fa127b75-3942-4fba-815f-197979d77117-kube-api-access-zkjdn\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.150199 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa127b75-3942-4fba-815f-197979d77117-logs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.153903 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-config-data\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.153953 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.156671 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa127b75-3942-4fba-815f-197979d77117-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.173567 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkjdn\" (UniqueName: \"kubernetes.io/projected/fa127b75-3942-4fba-815f-197979d77117-kube-api-access-zkjdn\") pod \"nova-metadata-0\" (UID: \"fa127b75-3942-4fba-815f-197979d77117\") " pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.206549 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.691446 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.811235 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.854265 4760 generic.go:334] "Generic (PLEG): container finished" podID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" exitCode=0 Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.854349 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57ccb0e4-90b5-4029-b30f-4eb9973fa389","Type":"ContainerDied","Data":"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584"} Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.854394 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57ccb0e4-90b5-4029-b30f-4eb9973fa389","Type":"ContainerDied","Data":"1278d351fd84805ea4b0b99a909f8eaa4f3682661b3a58b83230a5963a195201"} Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.854415 4760 scope.go:117] "RemoveContainer" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.855622 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.856162 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa127b75-3942-4fba-815f-197979d77117","Type":"ContainerStarted","Data":"49ee91d8c35d32ae38fdc71c1513caf02ff3cdd6a26fb4ee1bbc91261f544266"} Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.863748 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w5cr\" (UniqueName: \"kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr\") pod \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.863935 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle\") pod \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.864241 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data\") pod \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\" (UID: \"57ccb0e4-90b5-4029-b30f-4eb9973fa389\") " Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.872586 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr" (OuterVolumeSpecName: "kube-api-access-6w5cr") pod "57ccb0e4-90b5-4029-b30f-4eb9973fa389" (UID: "57ccb0e4-90b5-4029-b30f-4eb9973fa389"). InnerVolumeSpecName "kube-api-access-6w5cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.888342 4760 scope.go:117] "RemoveContainer" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" Nov 24 17:23:04 crc kubenswrapper[4760]: E1124 17:23:04.888842 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584\": container with ID starting with 4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584 not found: ID does not exist" containerID="4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.888886 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584"} err="failed to get container status \"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584\": rpc error: code = NotFound desc = could not find container \"4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584\": container with ID starting with 4ce58a6801a7824522473be7b266cb8c614828cc8cdcd2c2c3b72f34bbd70584 not found: ID does not exist" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.898554 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57ccb0e4-90b5-4029-b30f-4eb9973fa389" (UID: "57ccb0e4-90b5-4029-b30f-4eb9973fa389"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.900655 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data" (OuterVolumeSpecName: "config-data") pod "57ccb0e4-90b5-4029-b30f-4eb9973fa389" (UID: "57ccb0e4-90b5-4029-b30f-4eb9973fa389"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.966750 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w5cr\" (UniqueName: \"kubernetes.io/projected/57ccb0e4-90b5-4029-b30f-4eb9973fa389-kube-api-access-6w5cr\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.967069 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:04 crc kubenswrapper[4760]: I1124 17:23:04.967080 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57ccb0e4-90b5-4029-b30f-4eb9973fa389-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.185351 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.194422 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.209163 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:23:05 crc kubenswrapper[4760]: E1124 17:23:05.209869 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerName="nova-scheduler-scheduler" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.209977 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerName="nova-scheduler-scheduler" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.210293 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" containerName="nova-scheduler-scheduler" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.211362 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.213173 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.219104 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.272186 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.272274 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx4fh\" (UniqueName: \"kubernetes.io/projected/f54be0fa-3248-4732-b118-546367054335-kube-api-access-rx4fh\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.272457 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-config-data\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.373887 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.373990 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx4fh\" (UniqueName: \"kubernetes.io/projected/f54be0fa-3248-4732-b118-546367054335-kube-api-access-rx4fh\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.374121 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-config-data\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.378677 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-config-data\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.386784 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f54be0fa-3248-4732-b118-546367054335-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.391298 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx4fh\" (UniqueName: \"kubernetes.io/projected/f54be0fa-3248-4732-b118-546367054335-kube-api-access-rx4fh\") pod \"nova-scheduler-0\" (UID: \"f54be0fa-3248-4732-b118-546367054335\") " pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.476503 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b6228d3-58e5-48e3-b881-9381015853b3" path="/var/lib/kubelet/pods/4b6228d3-58e5-48e3-b881-9381015853b3/volumes" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.477082 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57ccb0e4-90b5-4029-b30f-4eb9973fa389" path="/var/lib/kubelet/pods/57ccb0e4-90b5-4029-b30f-4eb9973fa389/volumes" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.604398 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.642415 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.642483 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.869216 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa127b75-3942-4fba-815f-197979d77117","Type":"ContainerStarted","Data":"f151ca38ab829ee3f3f289e40e763f2670cf280104c0f3f7d914bedbf9edcf21"} Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.869637 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa127b75-3942-4fba-815f-197979d77117","Type":"ContainerStarted","Data":"71a0fb4f08579205499c441b299f87b1b7066cc9a8afc11510c57f67630ea64e"} Nov 24 17:23:05 crc kubenswrapper[4760]: I1124 17:23:05.891317 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.89129898 podStartE2EDuration="2.89129898s" podCreationTimestamp="2025-11-24 17:23:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:23:05.89025325 +0000 UTC m=+1181.213134820" watchObservedRunningTime="2025-11-24 17:23:05.89129898 +0000 UTC m=+1181.214180540" Nov 24 17:23:06 crc kubenswrapper[4760]: I1124 17:23:06.059304 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 17:23:06 crc kubenswrapper[4760]: W1124 17:23:06.070788 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf54be0fa_3248_4732_b118_546367054335.slice/crio-6ec8ceb86aaa3481ab7ecaf62660d36d550083f26302b89e0ac3c7c75cc54140 WatchSource:0}: Error finding container 6ec8ceb86aaa3481ab7ecaf62660d36d550083f26302b89e0ac3c7c75cc54140: Status 404 returned error can't find the container with id 6ec8ceb86aaa3481ab7ecaf62660d36d550083f26302b89e0ac3c7c75cc54140 Nov 24 17:23:06 crc kubenswrapper[4760]: I1124 17:23:06.883935 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f54be0fa-3248-4732-b118-546367054335","Type":"ContainerStarted","Data":"ffc436657f0206b33ecef41e42fb7eb79cb55988f13823764c1733704133dbb4"} Nov 24 17:23:06 crc kubenswrapper[4760]: I1124 17:23:06.884399 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f54be0fa-3248-4732-b118-546367054335","Type":"ContainerStarted","Data":"6ec8ceb86aaa3481ab7ecaf62660d36d550083f26302b89e0ac3c7c75cc54140"} Nov 24 17:23:06 crc kubenswrapper[4760]: I1124 17:23:06.928230 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.9281960599999999 podStartE2EDuration="1.92819606s" podCreationTimestamp="2025-11-24 17:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:23:06.909166795 +0000 UTC m=+1182.232048395" watchObservedRunningTime="2025-11-24 17:23:06.92819606 +0000 UTC m=+1182.251077650" Nov 24 17:23:09 crc kubenswrapper[4760]: I1124 17:23:09.206751 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:23:09 crc kubenswrapper[4760]: I1124 17:23:09.207211 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 17:23:10 crc kubenswrapper[4760]: I1124 17:23:10.604791 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 17:23:11 crc kubenswrapper[4760]: I1124 17:23:11.193297 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:23:11 crc kubenswrapper[4760]: I1124 17:23:11.193789 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 17:23:12 crc kubenswrapper[4760]: I1124 17:23:12.208161 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6fcad265-b82c-400e-afce-ac2afac950d0" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 17:23:12 crc kubenswrapper[4760]: I1124 17:23:12.208365 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6fcad265-b82c-400e-afce-ac2afac950d0" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 17:23:14 crc kubenswrapper[4760]: I1124 17:23:14.207416 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 17:23:14 crc kubenswrapper[4760]: I1124 17:23:14.207787 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 17:23:15 crc kubenswrapper[4760]: I1124 17:23:15.226133 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fa127b75-3942-4fba-815f-197979d77117" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 17:23:15 crc kubenswrapper[4760]: I1124 17:23:15.226207 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fa127b75-3942-4fba-815f-197979d77117" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 17:23:15 crc kubenswrapper[4760]: I1124 17:23:15.605372 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 17:23:15 crc kubenswrapper[4760]: I1124 17:23:15.648475 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 17:23:16 crc kubenswrapper[4760]: I1124 17:23:16.039568 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 17:23:18 crc kubenswrapper[4760]: I1124 17:23:18.039457 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 17:23:21 crc kubenswrapper[4760]: I1124 17:23:21.203581 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 17:23:21 crc kubenswrapper[4760]: I1124 17:23:21.204320 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 17:23:21 crc kubenswrapper[4760]: I1124 17:23:21.213812 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 17:23:21 crc kubenswrapper[4760]: I1124 17:23:21.215657 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 17:23:22 crc kubenswrapper[4760]: I1124 17:23:22.061849 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 17:23:22 crc kubenswrapper[4760]: I1124 17:23:22.070593 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 17:23:24 crc kubenswrapper[4760]: I1124 17:23:24.246060 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 17:23:24 crc kubenswrapper[4760]: I1124 17:23:24.252608 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 17:23:24 crc kubenswrapper[4760]: I1124 17:23:24.252922 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 17:23:25 crc kubenswrapper[4760]: I1124 17:23:25.106342 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 17:23:33 crc kubenswrapper[4760]: I1124 17:23:33.055854 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:33 crc kubenswrapper[4760]: I1124 17:23:33.881797 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:35 crc kubenswrapper[4760]: I1124 17:23:35.642386 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:23:35 crc kubenswrapper[4760]: I1124 17:23:35.643712 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:23:35 crc kubenswrapper[4760]: I1124 17:23:35.643821 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:23:35 crc kubenswrapper[4760]: I1124 17:23:35.644545 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:23:35 crc kubenswrapper[4760]: I1124 17:23:35.644688 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f" gracePeriod=600 Nov 24 17:23:36 crc kubenswrapper[4760]: I1124 17:23:36.207262 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f" exitCode=0 Nov 24 17:23:36 crc kubenswrapper[4760]: I1124 17:23:36.207352 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f"} Nov 24 17:23:36 crc kubenswrapper[4760]: I1124 17:23:36.207541 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796"} Nov 24 17:23:36 crc kubenswrapper[4760]: I1124 17:23:36.207562 4760 scope.go:117] "RemoveContainer" containerID="dfd774042184cc119075d4a563b6ff781e5839c1eacbc702f706225028bd27c8" Nov 24 17:23:37 crc kubenswrapper[4760]: I1124 17:23:37.115455 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="rabbitmq" containerID="cri-o://4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43" gracePeriod=604796 Nov 24 17:23:37 crc kubenswrapper[4760]: I1124 17:23:37.923531 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="rabbitmq" containerID="cri-o://53b70a1d427266be2a542010ea82a8dcd92baf006ad471459697a2de8d524fb6" gracePeriod=604796 Nov 24 17:23:38 crc kubenswrapper[4760]: I1124 17:23:38.109333 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 24 17:23:38 crc kubenswrapper[4760]: I1124 17:23:38.508917 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.712657 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864594 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864685 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55qbx\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864708 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864743 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864772 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864823 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864851 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864873 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864925 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864952 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.864968 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\" (UID: \"3a459f6d-ed01-4235-9062-4deb6ac9ccec\") " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.865995 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.870351 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.871276 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.875156 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.875167 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.875238 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx" (OuterVolumeSpecName: "kube-api-access-55qbx") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "kube-api-access-55qbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.875577 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info" (OuterVolumeSpecName: "pod-info") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.877151 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.914476 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data" (OuterVolumeSpecName: "config-data") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.927964 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf" (OuterVolumeSpecName: "server-conf") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966685 4760 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966724 4760 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966738 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966748 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a459f6d-ed01-4235-9062-4deb6ac9ccec-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966760 4760 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a459f6d-ed01-4235-9062-4deb6ac9ccec-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966774 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966807 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966820 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55qbx\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-kube-api-access-55qbx\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966831 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.966845 4760 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a459f6d-ed01-4235-9062-4deb6ac9ccec-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:43 crc kubenswrapper[4760]: I1124 17:23:43.991028 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.010237 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3a459f6d-ed01-4235-9062-4deb6ac9ccec" (UID: "3a459f6d-ed01-4235-9062-4deb6ac9ccec"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.068269 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.068305 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a459f6d-ed01-4235-9062-4deb6ac9ccec-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.284871 4760 generic.go:334] "Generic (PLEG): container finished" podID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerID="53b70a1d427266be2a542010ea82a8dcd92baf006ad471459697a2de8d524fb6" exitCode=0 Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.285029 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerDied","Data":"53b70a1d427266be2a542010ea82a8dcd92baf006ad471459697a2de8d524fb6"} Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.287558 4760 generic.go:334] "Generic (PLEG): container finished" podID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerID="4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43" exitCode=0 Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.287598 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerDied","Data":"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43"} Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.287625 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a459f6d-ed01-4235-9062-4deb6ac9ccec","Type":"ContainerDied","Data":"0cf61669b8e9c7b907848d7b48ca1faeb074a03ce6f11b16c73bddb014cbfeb0"} Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.287642 4760 scope.go:117] "RemoveContainer" containerID="4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.287796 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.333293 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.337316 4760 scope.go:117] "RemoveContainer" containerID="694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.346568 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.367198 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:44 crc kubenswrapper[4760]: E1124 17:23:44.367777 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="rabbitmq" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.367795 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="rabbitmq" Nov 24 17:23:44 crc kubenswrapper[4760]: E1124 17:23:44.367806 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="setup-container" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.367816 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="setup-container" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.368612 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" containerName="rabbitmq" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.370264 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375371 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375381 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375566 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-jszzd" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375628 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375703 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.375969 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.376329 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.378389 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.418028 4760 scope.go:117] "RemoveContainer" containerID="4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43" Nov 24 17:23:44 crc kubenswrapper[4760]: E1124 17:23:44.418612 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43\": container with ID starting with 4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43 not found: ID does not exist" containerID="4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.418661 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43"} err="failed to get container status \"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43\": rpc error: code = NotFound desc = could not find container \"4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43\": container with ID starting with 4a288b9ee52cae636d8848da3a228bdfcffa52a443b119284d32c3150819ca43 not found: ID does not exist" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.418691 4760 scope.go:117] "RemoveContainer" containerID="694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4" Nov 24 17:23:44 crc kubenswrapper[4760]: E1124 17:23:44.423373 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4\": container with ID starting with 694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4 not found: ID does not exist" containerID="694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.423420 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4"} err="failed to get container status \"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4\": rpc error: code = NotFound desc = could not find container \"694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4\": container with ID starting with 694d7a7b87fc5b3e3840b37d76216581033a503ef5513fefc0f0a262e875c2f4 not found: ID does not exist" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479574 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479623 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479649 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479683 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479721 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxnqs\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-kube-api-access-qxnqs\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479748 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479804 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479832 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e89585e-aad9-485c-88af-2380cefb8b18-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479876 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e89585e-aad9-485c-88af-2380cefb8b18-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479914 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.479961 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-config-data\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.541663 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582191 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582252 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582282 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582369 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582414 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxnqs\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-kube-api-access-qxnqs\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582438 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582494 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582522 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e89585e-aad9-485c-88af-2380cefb8b18-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582564 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e89585e-aad9-485c-88af-2380cefb8b18-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582603 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582650 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-config-data\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.582775 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.583665 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-config-data\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.583718 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.583944 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.584372 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.589770 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.590737 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e89585e-aad9-485c-88af-2380cefb8b18-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.596028 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.598138 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e89585e-aad9-485c-88af-2380cefb8b18-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.599504 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e89585e-aad9-485c-88af-2380cefb8b18-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.624199 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxnqs\" (UniqueName: \"kubernetes.io/projected/8e89585e-aad9-485c-88af-2380cefb8b18-kube-api-access-qxnqs\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.632917 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"8e89585e-aad9-485c-88af-2380cefb8b18\") " pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.683855 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.683922 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.683973 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684016 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684057 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4sb8\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684082 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684117 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684156 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684256 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684281 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684310 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"9d3132aa-0715-4d60-840c-fca7d6fef37c\" (UID: \"9d3132aa-0715-4d60-840c-fca7d6fef37c\") " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684492 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684547 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.684855 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.685184 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.685207 4760 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.685221 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.688364 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.688384 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.689592 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.689840 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info" (OuterVolumeSpecName: "pod-info") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.690986 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8" (OuterVolumeSpecName: "kube-api-access-g4sb8") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "kube-api-access-g4sb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.716249 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data" (OuterVolumeSpecName: "config-data") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.738684 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf" (OuterVolumeSpecName: "server-conf") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.751474 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.787238 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.788989 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.789144 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4sb8\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-kube-api-access-g4sb8\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.789180 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.789193 4760 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d3132aa-0715-4d60-840c-fca7d6fef37c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.789205 4760 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d3132aa-0715-4d60-840c-fca7d6fef37c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.789215 4760 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d3132aa-0715-4d60-840c-fca7d6fef37c-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.793568 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9d3132aa-0715-4d60-840c-fca7d6fef37c" (UID: "9d3132aa-0715-4d60-840c-fca7d6fef37c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.814985 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.893846 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:44 crc kubenswrapper[4760]: I1124 17:23:44.893906 4760 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d3132aa-0715-4d60-840c-fca7d6fef37c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.205115 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.300518 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9d3132aa-0715-4d60-840c-fca7d6fef37c","Type":"ContainerDied","Data":"7e1a8f84326597411b1acf98e2d315900e1134e18b9f60fd3f868fe856e4d2d4"} Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.300589 4760 scope.go:117] "RemoveContainer" containerID="53b70a1d427266be2a542010ea82a8dcd92baf006ad471459697a2de8d524fb6" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.300630 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.304199 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8e89585e-aad9-485c-88af-2380cefb8b18","Type":"ContainerStarted","Data":"5314490bd39a838493e883297381f49e76144337e53e63adcba9d31a961ee263"} Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.332163 4760 scope.go:117] "RemoveContainer" containerID="ae3c541e7631907d00510787756cfe3edbe148432975656cb203a8e3db203fd5" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.481512 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a459f6d-ed01-4235-9062-4deb6ac9ccec" path="/var/lib/kubelet/pods/3a459f6d-ed01-4235-9062-4deb6ac9ccec/volumes" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.482374 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.482396 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.499597 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:45 crc kubenswrapper[4760]: E1124 17:23:45.500810 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="setup-container" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.500837 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="setup-container" Nov 24 17:23:45 crc kubenswrapper[4760]: E1124 17:23:45.500851 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="rabbitmq" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.500860 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="rabbitmq" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.501114 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" containerName="rabbitmq" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.503539 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.509226 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.509478 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.509770 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.509913 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.510791 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5x4sv" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.512574 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.513198 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.514255 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.607896 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608069 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608118 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608141 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b8dd252d-07db-4037-b8c0-09ca191d9f56-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608173 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b8dd252d-07db-4037-b8c0-09ca191d9f56-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608226 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5c9h\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-kube-api-access-l5c9h\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608259 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608315 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608387 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608411 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.608526 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710055 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710117 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710151 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b8dd252d-07db-4037-b8c0-09ca191d9f56-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710182 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b8dd252d-07db-4037-b8c0-09ca191d9f56-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710209 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5c9h\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-kube-api-access-l5c9h\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710230 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710276 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710313 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710334 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710401 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.710440 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.711889 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.712409 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.712773 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.712870 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.713148 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b8dd252d-07db-4037-b8c0-09ca191d9f56-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.714123 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.716945 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.719247 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b8dd252d-07db-4037-b8c0-09ca191d9f56-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.719337 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.719751 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b8dd252d-07db-4037-b8c0-09ca191d9f56-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.729391 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5c9h\" (UniqueName: \"kubernetes.io/projected/b8dd252d-07db-4037-b8c0-09ca191d9f56-kube-api-access-l5c9h\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.752477 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b8dd252d-07db-4037-b8c0-09ca191d9f56\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:45 crc kubenswrapper[4760]: I1124 17:23:45.824537 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:23:46 crc kubenswrapper[4760]: I1124 17:23:46.280848 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 17:23:46 crc kubenswrapper[4760]: W1124 17:23:46.335879 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8dd252d_07db_4037_b8c0_09ca191d9f56.slice/crio-8323a31a823bd16ebd7e0efef1dcd6e05248392d159f2d3ca0d021498dc557f0 WatchSource:0}: Error finding container 8323a31a823bd16ebd7e0efef1dcd6e05248392d159f2d3ca0d021498dc557f0: Status 404 returned error can't find the container with id 8323a31a823bd16ebd7e0efef1dcd6e05248392d159f2d3ca0d021498dc557f0 Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.038473 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.040215 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.044647 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.092998 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136348 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136784 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136818 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9jd5\" (UniqueName: \"kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136866 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136905 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136936 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.136960 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.239233 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.239568 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.239738 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.239878 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9jd5\" (UniqueName: \"kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.240118 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.241046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.241781 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.240677 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.240956 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.240490 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.241702 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.240487 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.242584 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.258156 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9jd5\" (UniqueName: \"kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5\") pod \"dnsmasq-dns-79bd4cc8c9-6jmqn\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.336702 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8e89585e-aad9-485c-88af-2380cefb8b18","Type":"ContainerStarted","Data":"cc3bc3fc123bf56f6cffd8e642ef512b6f622c26be48b4299701d13cdc0dd189"} Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.338383 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b8dd252d-07db-4037-b8c0-09ca191d9f56","Type":"ContainerStarted","Data":"8323a31a823bd16ebd7e0efef1dcd6e05248392d159f2d3ca0d021498dc557f0"} Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.376575 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.488993 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d3132aa-0715-4d60-840c-fca7d6fef37c" path="/var/lib/kubelet/pods/9d3132aa-0715-4d60-840c-fca7d6fef37c/volumes" Nov 24 17:23:47 crc kubenswrapper[4760]: I1124 17:23:47.867448 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:23:48 crc kubenswrapper[4760]: I1124 17:23:48.347976 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b8dd252d-07db-4037-b8c0-09ca191d9f56","Type":"ContainerStarted","Data":"2e72899239a6fc08b26cb6790bf7ad91896591d3c5e7fb7857ac42f0b9c84dc6"} Nov 24 17:23:48 crc kubenswrapper[4760]: I1124 17:23:48.349625 4760 generic.go:334] "Generic (PLEG): container finished" podID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerID="25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07" exitCode=0 Nov 24 17:23:48 crc kubenswrapper[4760]: I1124 17:23:48.349671 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" event={"ID":"3b86828c-25b0-4cd3-80d7-39a031f52017","Type":"ContainerDied","Data":"25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07"} Nov 24 17:23:48 crc kubenswrapper[4760]: I1124 17:23:48.349693 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" event={"ID":"3b86828c-25b0-4cd3-80d7-39a031f52017","Type":"ContainerStarted","Data":"b6bb1d1561f83712d1cb6dcd60fdb8f7c5a704c9694981d71dcc49a12dff1c80"} Nov 24 17:23:49 crc kubenswrapper[4760]: I1124 17:23:49.361112 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" event={"ID":"3b86828c-25b0-4cd3-80d7-39a031f52017","Type":"ContainerStarted","Data":"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259"} Nov 24 17:23:49 crc kubenswrapper[4760]: I1124 17:23:49.383926 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" podStartSLOduration=3.38391069 podStartE2EDuration="3.38391069s" podCreationTimestamp="2025-11-24 17:23:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:23:49.37904382 +0000 UTC m=+1224.701925370" watchObservedRunningTime="2025-11-24 17:23:49.38391069 +0000 UTC m=+1224.706792240" Nov 24 17:23:50 crc kubenswrapper[4760]: I1124 17:23:50.370090 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.378204 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.454386 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.455578 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="dnsmasq-dns" containerID="cri-o://e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde" gracePeriod=10 Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.614979 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55478c4467-9frwn"] Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.616896 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.641537 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55478c4467-9frwn"] Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.758901 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-sb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.758980 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-swift-storage-0\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.759110 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-nb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.759202 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-config\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.759237 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-openstack-edpm-ipam\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.759261 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-svc\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.759332 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7672j\" (UniqueName: \"kubernetes.io/projected/bb018af8-7779-4386-8903-a1dfb982a26e-kube-api-access-7672j\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861051 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-svc\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861498 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7672j\" (UniqueName: \"kubernetes.io/projected/bb018af8-7779-4386-8903-a1dfb982a26e-kube-api-access-7672j\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861573 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-sb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861613 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-swift-storage-0\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861655 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-nb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861716 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-config\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.861742 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-openstack-edpm-ipam\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.864895 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-openstack-edpm-ipam\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.864942 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-nb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.865410 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-svc\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.865638 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-config\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.865816 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-ovsdbserver-sb\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.866070 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb018af8-7779-4386-8903-a1dfb982a26e-dns-swift-storage-0\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.886998 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7672j\" (UniqueName: \"kubernetes.io/projected/bb018af8-7779-4386-8903-a1dfb982a26e-kube-api-access-7672j\") pod \"dnsmasq-dns-55478c4467-9frwn\" (UID: \"bb018af8-7779-4386-8903-a1dfb982a26e\") " pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:57 crc kubenswrapper[4760]: I1124 17:23:57.953127 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.082375 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.165719 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.165819 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.165876 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.165963 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.166042 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.166093 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z4x8\" (UniqueName: \"kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8\") pod \"6e587246-348f-4c91-96ce-88dd0beafac8\" (UID: \"6e587246-348f-4c91-96ce-88dd0beafac8\") " Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.192728 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8" (OuterVolumeSpecName: "kube-api-access-7z4x8") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "kube-api-access-7z4x8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.243475 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.245593 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.245771 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config" (OuterVolumeSpecName: "config") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.250205 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.250628 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6e587246-348f-4c91-96ce-88dd0beafac8" (UID: "6e587246-348f-4c91-96ce-88dd0beafac8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.268577 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.268839 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.268972 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.269107 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.269196 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6e587246-348f-4c91-96ce-88dd0beafac8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.269286 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z4x8\" (UniqueName: \"kubernetes.io/projected/6e587246-348f-4c91-96ce-88dd0beafac8-kube-api-access-7z4x8\") on node \"crc\" DevicePath \"\"" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.443417 4760 generic.go:334] "Generic (PLEG): container finished" podID="6e587246-348f-4c91-96ce-88dd0beafac8" containerID="e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde" exitCode=0 Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.443510 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" event={"ID":"6e587246-348f-4c91-96ce-88dd0beafac8","Type":"ContainerDied","Data":"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde"} Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.443542 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.443563 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-xr5zm" event={"ID":"6e587246-348f-4c91-96ce-88dd0beafac8","Type":"ContainerDied","Data":"7e2c27049b307dd16a8a5fc61a0430c0cf7d0ea6d775fba40659dcb29bfe4f92"} Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.443581 4760 scope.go:117] "RemoveContainer" containerID="e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.472360 4760 scope.go:117] "RemoveContainer" containerID="88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.485922 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.494589 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-xr5zm"] Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.512302 4760 scope.go:117] "RemoveContainer" containerID="e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde" Nov 24 17:23:58 crc kubenswrapper[4760]: E1124 17:23:58.512832 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde\": container with ID starting with e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde not found: ID does not exist" containerID="e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.512908 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde"} err="failed to get container status \"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde\": rpc error: code = NotFound desc = could not find container \"e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde\": container with ID starting with e93e6ebe14a999bcb7aa50e206bb74e20e329e2bced5281d92f3b21c2f57adde not found: ID does not exist" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.512943 4760 scope.go:117] "RemoveContainer" containerID="88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f" Nov 24 17:23:58 crc kubenswrapper[4760]: E1124 17:23:58.513267 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f\": container with ID starting with 88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f not found: ID does not exist" containerID="88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.513296 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f"} err="failed to get container status \"88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f\": rpc error: code = NotFound desc = could not find container \"88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f\": container with ID starting with 88fed744b5270d0e443a229e8746be9b013b8708c0e2233fa28de80acc4a9b7f not found: ID does not exist" Nov 24 17:23:58 crc kubenswrapper[4760]: I1124 17:23:58.517694 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55478c4467-9frwn"] Nov 24 17:23:58 crc kubenswrapper[4760]: W1124 17:23:58.526415 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb018af8_7779_4386_8903_a1dfb982a26e.slice/crio-811dd792767842dceef4f47979f1cc3d84b424709946708fdc5c734c9afd2647 WatchSource:0}: Error finding container 811dd792767842dceef4f47979f1cc3d84b424709946708fdc5c734c9afd2647: Status 404 returned error can't find the container with id 811dd792767842dceef4f47979f1cc3d84b424709946708fdc5c734c9afd2647 Nov 24 17:23:59 crc kubenswrapper[4760]: I1124 17:23:59.465779 4760 generic.go:334] "Generic (PLEG): container finished" podID="bb018af8-7779-4386-8903-a1dfb982a26e" containerID="f55a08a34bc8a7b5b98dc64fac312e8b8c81aca8b23387644b5fcb79fd100933" exitCode=0 Nov 24 17:23:59 crc kubenswrapper[4760]: I1124 17:23:59.499533 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" path="/var/lib/kubelet/pods/6e587246-348f-4c91-96ce-88dd0beafac8/volumes" Nov 24 17:23:59 crc kubenswrapper[4760]: I1124 17:23:59.500336 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55478c4467-9frwn" event={"ID":"bb018af8-7779-4386-8903-a1dfb982a26e","Type":"ContainerDied","Data":"f55a08a34bc8a7b5b98dc64fac312e8b8c81aca8b23387644b5fcb79fd100933"} Nov 24 17:23:59 crc kubenswrapper[4760]: I1124 17:23:59.500379 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55478c4467-9frwn" event={"ID":"bb018af8-7779-4386-8903-a1dfb982a26e","Type":"ContainerStarted","Data":"811dd792767842dceef4f47979f1cc3d84b424709946708fdc5c734c9afd2647"} Nov 24 17:24:00 crc kubenswrapper[4760]: I1124 17:24:00.481293 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55478c4467-9frwn" event={"ID":"bb018af8-7779-4386-8903-a1dfb982a26e","Type":"ContainerStarted","Data":"ff221f84216b1958862e30269b12083c5bb97d9dd8109cf838436994a8e3f3a4"} Nov 24 17:24:00 crc kubenswrapper[4760]: I1124 17:24:00.481974 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:24:00 crc kubenswrapper[4760]: I1124 17:24:00.515768 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55478c4467-9frwn" podStartSLOduration=3.515748077 podStartE2EDuration="3.515748077s" podCreationTimestamp="2025-11-24 17:23:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:24:00.503756663 +0000 UTC m=+1235.826638283" watchObservedRunningTime="2025-11-24 17:24:00.515748077 +0000 UTC m=+1235.838629627" Nov 24 17:24:07 crc kubenswrapper[4760]: I1124 17:24:07.954932 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55478c4467-9frwn" Nov 24 17:24:08 crc kubenswrapper[4760]: I1124 17:24:08.039387 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:24:08 crc kubenswrapper[4760]: I1124 17:24:08.039671 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="dnsmasq-dns" containerID="cri-o://362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259" gracePeriod=10 Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.055755 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199074 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199156 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9jd5\" (UniqueName: \"kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199199 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199231 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199274 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199304 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.199408 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config\") pod \"3b86828c-25b0-4cd3-80d7-39a031f52017\" (UID: \"3b86828c-25b0-4cd3-80d7-39a031f52017\") " Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.206079 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5" (OuterVolumeSpecName: "kube-api-access-q9jd5") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "kube-api-access-q9jd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.265786 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.268136 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.268597 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.271082 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.275161 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config" (OuterVolumeSpecName: "config") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.276607 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3b86828c-25b0-4cd3-80d7-39a031f52017" (UID: "3b86828c-25b0-4cd3-80d7-39a031f52017"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301603 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301650 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9jd5\" (UniqueName: \"kubernetes.io/projected/3b86828c-25b0-4cd3-80d7-39a031f52017-kube-api-access-q9jd5\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301669 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301682 4760 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301694 4760 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301706 4760 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.301718 4760 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b86828c-25b0-4cd3-80d7-39a031f52017-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.576963 4760 generic.go:334] "Generic (PLEG): container finished" podID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerID="362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259" exitCode=0 Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.577060 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.577063 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" event={"ID":"3b86828c-25b0-4cd3-80d7-39a031f52017","Type":"ContainerDied","Data":"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259"} Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.577419 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-6jmqn" event={"ID":"3b86828c-25b0-4cd3-80d7-39a031f52017","Type":"ContainerDied","Data":"b6bb1d1561f83712d1cb6dcd60fdb8f7c5a704c9694981d71dcc49a12dff1c80"} Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.577438 4760 scope.go:117] "RemoveContainer" containerID="362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.605534 4760 scope.go:117] "RemoveContainer" containerID="25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.607606 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.615441 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-6jmqn"] Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.627241 4760 scope.go:117] "RemoveContainer" containerID="362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259" Nov 24 17:24:09 crc kubenswrapper[4760]: E1124 17:24:09.628238 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259\": container with ID starting with 362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259 not found: ID does not exist" containerID="362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.628300 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259"} err="failed to get container status \"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259\": rpc error: code = NotFound desc = could not find container \"362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259\": container with ID starting with 362bd8c771ba2c30bd184419aebd7f1742de9b114b52c5e5e472ad56e70fd259 not found: ID does not exist" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.628331 4760 scope.go:117] "RemoveContainer" containerID="25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07" Nov 24 17:24:09 crc kubenswrapper[4760]: E1124 17:24:09.628642 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07\": container with ID starting with 25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07 not found: ID does not exist" containerID="25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07" Nov 24 17:24:09 crc kubenswrapper[4760]: I1124 17:24:09.628679 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07"} err="failed to get container status \"25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07\": rpc error: code = NotFound desc = could not find container \"25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07\": container with ID starting with 25accbefbfdb5bf219f299dbc0fd63dbf534318cdc58a9774acb4045c3d8de07 not found: ID does not exist" Nov 24 17:24:11 crc kubenswrapper[4760]: I1124 17:24:11.485941 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" path="/var/lib/kubelet/pods/3b86828c-25b0-4cd3-80d7-39a031f52017/volumes" Nov 24 17:24:18 crc kubenswrapper[4760]: I1124 17:24:18.679801 4760 generic.go:334] "Generic (PLEG): container finished" podID="8e89585e-aad9-485c-88af-2380cefb8b18" containerID="cc3bc3fc123bf56f6cffd8e642ef512b6f622c26be48b4299701d13cdc0dd189" exitCode=0 Nov 24 17:24:18 crc kubenswrapper[4760]: I1124 17:24:18.679850 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8e89585e-aad9-485c-88af-2380cefb8b18","Type":"ContainerDied","Data":"cc3bc3fc123bf56f6cffd8e642ef512b6f622c26be48b4299701d13cdc0dd189"} Nov 24 17:24:19 crc kubenswrapper[4760]: I1124 17:24:19.692731 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8e89585e-aad9-485c-88af-2380cefb8b18","Type":"ContainerStarted","Data":"ec125d358f25383b43239ae5bdccfb6b0161cfb5b4b996570b7eb2e510e3516c"} Nov 24 17:24:19 crc kubenswrapper[4760]: I1124 17:24:19.693397 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 17:24:19 crc kubenswrapper[4760]: I1124 17:24:19.720696 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=35.720681616 podStartE2EDuration="35.720681616s" podCreationTimestamp="2025-11-24 17:23:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:24:19.717090003 +0000 UTC m=+1255.039971593" watchObservedRunningTime="2025-11-24 17:24:19.720681616 +0000 UTC m=+1255.043563166" Nov 24 17:24:20 crc kubenswrapper[4760]: I1124 17:24:20.705692 4760 generic.go:334] "Generic (PLEG): container finished" podID="b8dd252d-07db-4037-b8c0-09ca191d9f56" containerID="2e72899239a6fc08b26cb6790bf7ad91896591d3c5e7fb7857ac42f0b9c84dc6" exitCode=0 Nov 24 17:24:20 crc kubenswrapper[4760]: I1124 17:24:20.705769 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b8dd252d-07db-4037-b8c0-09ca191d9f56","Type":"ContainerDied","Data":"2e72899239a6fc08b26cb6790bf7ad91896591d3c5e7fb7857ac42f0b9c84dc6"} Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.304020 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh"] Nov 24 17:24:21 crc kubenswrapper[4760]: E1124 17:24:21.305065 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305088 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: E1124 17:24:21.305106 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305114 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: E1124 17:24:21.305123 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="init" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305131 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="init" Nov 24 17:24:21 crc kubenswrapper[4760]: E1124 17:24:21.305162 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="init" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305169 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="init" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305402 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e587246-348f-4c91-96ce-88dd0beafac8" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.305425 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b86828c-25b0-4cd3-80d7-39a031f52017" containerName="dnsmasq-dns" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.306183 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.310845 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.311187 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.311351 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.311465 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.315090 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh"] Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.448837 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.449222 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc5jt\" (UniqueName: \"kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.449388 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.449581 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.551478 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.551587 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc5jt\" (UniqueName: \"kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.551640 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.551716 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.555812 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.557728 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.570061 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.571986 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc5jt\" (UniqueName: \"kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.650727 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.737770 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b8dd252d-07db-4037-b8c0-09ca191d9f56","Type":"ContainerStarted","Data":"e9821748d510a80b587f5023b55528935e180073508fa5f8566919eacc26434a"} Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.738076 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:24:21 crc kubenswrapper[4760]: I1124 17:24:21.775477 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.775454327 podStartE2EDuration="36.775454327s" podCreationTimestamp="2025-11-24 17:23:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 17:24:21.763934896 +0000 UTC m=+1257.086816456" watchObservedRunningTime="2025-11-24 17:24:21.775454327 +0000 UTC m=+1257.098335877" Nov 24 17:24:22 crc kubenswrapper[4760]: I1124 17:24:22.207563 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh"] Nov 24 17:24:22 crc kubenswrapper[4760]: I1124 17:24:22.213598 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:24:22 crc kubenswrapper[4760]: I1124 17:24:22.750093 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" event={"ID":"bf199ff4-4624-4608-8b45-72a6f1437473","Type":"ContainerStarted","Data":"19acc214fa5fb2efd48f8af31042d9ba18afae406c589ea83b33f3abd4ce34f9"} Nov 24 17:24:30 crc kubenswrapper[4760]: I1124 17:24:30.701273 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:24:31 crc kubenswrapper[4760]: I1124 17:24:31.850873 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" event={"ID":"bf199ff4-4624-4608-8b45-72a6f1437473","Type":"ContainerStarted","Data":"b2e7225c805d7bd8564f13a14bd7ed7ac5a5d8cf884eeee72984ac9f7ad0d96f"} Nov 24 17:24:31 crc kubenswrapper[4760]: I1124 17:24:31.875134 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" podStartSLOduration=2.389514078 podStartE2EDuration="10.875115075s" podCreationTimestamp="2025-11-24 17:24:21 +0000 UTC" firstStartedPulling="2025-11-24 17:24:22.213335475 +0000 UTC m=+1257.536217025" lastFinishedPulling="2025-11-24 17:24:30.698936472 +0000 UTC m=+1266.021818022" observedRunningTime="2025-11-24 17:24:31.872533841 +0000 UTC m=+1267.195415411" watchObservedRunningTime="2025-11-24 17:24:31.875115075 +0000 UTC m=+1267.197996635" Nov 24 17:24:34 crc kubenswrapper[4760]: I1124 17:24:34.755341 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 17:24:35 crc kubenswrapper[4760]: I1124 17:24:35.829148 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 17:24:42 crc kubenswrapper[4760]: I1124 17:24:42.963329 4760 generic.go:334] "Generic (PLEG): container finished" podID="bf199ff4-4624-4608-8b45-72a6f1437473" containerID="b2e7225c805d7bd8564f13a14bd7ed7ac5a5d8cf884eeee72984ac9f7ad0d96f" exitCode=0 Nov 24 17:24:42 crc kubenswrapper[4760]: I1124 17:24:42.963441 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" event={"ID":"bf199ff4-4624-4608-8b45-72a6f1437473","Type":"ContainerDied","Data":"b2e7225c805d7bd8564f13a14bd7ed7ac5a5d8cf884eeee72984ac9f7ad0d96f"} Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.451582 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.509658 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory\") pod \"bf199ff4-4624-4608-8b45-72a6f1437473\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.509714 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key\") pod \"bf199ff4-4624-4608-8b45-72a6f1437473\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.509740 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle\") pod \"bf199ff4-4624-4608-8b45-72a6f1437473\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.509786 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc5jt\" (UniqueName: \"kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt\") pod \"bf199ff4-4624-4608-8b45-72a6f1437473\" (UID: \"bf199ff4-4624-4608-8b45-72a6f1437473\") " Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.517238 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "bf199ff4-4624-4608-8b45-72a6f1437473" (UID: "bf199ff4-4624-4608-8b45-72a6f1437473"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.527208 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt" (OuterVolumeSpecName: "kube-api-access-zc5jt") pod "bf199ff4-4624-4608-8b45-72a6f1437473" (UID: "bf199ff4-4624-4608-8b45-72a6f1437473"). InnerVolumeSpecName "kube-api-access-zc5jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.540887 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory" (OuterVolumeSpecName: "inventory") pod "bf199ff4-4624-4608-8b45-72a6f1437473" (UID: "bf199ff4-4624-4608-8b45-72a6f1437473"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.566494 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bf199ff4-4624-4608-8b45-72a6f1437473" (UID: "bf199ff4-4624-4608-8b45-72a6f1437473"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.612619 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc5jt\" (UniqueName: \"kubernetes.io/projected/bf199ff4-4624-4608-8b45-72a6f1437473-kube-api-access-zc5jt\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.612657 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.612668 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.612679 4760 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf199ff4-4624-4608-8b45-72a6f1437473-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.989865 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" event={"ID":"bf199ff4-4624-4608-8b45-72a6f1437473","Type":"ContainerDied","Data":"19acc214fa5fb2efd48f8af31042d9ba18afae406c589ea83b33f3abd4ce34f9"} Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.989932 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19acc214fa5fb2efd48f8af31042d9ba18afae406c589ea83b33f3abd4ce34f9" Nov 24 17:24:44 crc kubenswrapper[4760]: I1124 17:24:44.990034 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.117929 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz"] Nov 24 17:24:45 crc kubenswrapper[4760]: E1124 17:24:45.118950 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf199ff4-4624-4608-8b45-72a6f1437473" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.119177 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf199ff4-4624-4608-8b45-72a6f1437473" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.119665 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf199ff4-4624-4608-8b45-72a6f1437473" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.120780 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.122290 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.122516 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.124491 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.124506 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.124643 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.125306 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.154572 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz"] Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.224805 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mg2n\" (UniqueName: \"kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.224945 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.225165 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.229571 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.229988 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.327295 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mg2n\" (UniqueName: \"kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.349619 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mg2n\" (UniqueName: \"kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-7jrgz\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:45 crc kubenswrapper[4760]: I1124 17:24:45.449748 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:46 crc kubenswrapper[4760]: I1124 17:24:46.031889 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz"] Nov 24 17:24:47 crc kubenswrapper[4760]: I1124 17:24:47.014029 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" event={"ID":"bcfd976b-0081-44f8-b0f4-2ca0e2372299","Type":"ContainerStarted","Data":"fe2d90981e5c05e28a6c9dc23390b1993c790e982726def1ec5940f7eae63dbe"} Nov 24 17:24:47 crc kubenswrapper[4760]: I1124 17:24:47.014298 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" event={"ID":"bcfd976b-0081-44f8-b0f4-2ca0e2372299","Type":"ContainerStarted","Data":"b7541f4981322885d5c4b1a9b5702de1383fb231680fb550a09003cee43d4820"} Nov 24 17:24:47 crc kubenswrapper[4760]: I1124 17:24:47.054679 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" podStartSLOduration=1.647669283 podStartE2EDuration="2.054655535s" podCreationTimestamp="2025-11-24 17:24:45 +0000 UTC" firstStartedPulling="2025-11-24 17:24:46.039996845 +0000 UTC m=+1281.362878405" lastFinishedPulling="2025-11-24 17:24:46.446983097 +0000 UTC m=+1281.769864657" observedRunningTime="2025-11-24 17:24:47.038086859 +0000 UTC m=+1282.360968479" watchObservedRunningTime="2025-11-24 17:24:47.054655535 +0000 UTC m=+1282.377537115" Nov 24 17:24:50 crc kubenswrapper[4760]: I1124 17:24:50.047973 4760 generic.go:334] "Generic (PLEG): container finished" podID="bcfd976b-0081-44f8-b0f4-2ca0e2372299" containerID="fe2d90981e5c05e28a6c9dc23390b1993c790e982726def1ec5940f7eae63dbe" exitCode=0 Nov 24 17:24:50 crc kubenswrapper[4760]: I1124 17:24:50.048104 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" event={"ID":"bcfd976b-0081-44f8-b0f4-2ca0e2372299","Type":"ContainerDied","Data":"fe2d90981e5c05e28a6c9dc23390b1993c790e982726def1ec5940f7eae63dbe"} Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.527362 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.658639 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mg2n\" (UniqueName: \"kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n\") pod \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.658718 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory\") pod \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.658830 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key\") pod \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\" (UID: \"bcfd976b-0081-44f8-b0f4-2ca0e2372299\") " Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.668784 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n" (OuterVolumeSpecName: "kube-api-access-4mg2n") pod "bcfd976b-0081-44f8-b0f4-2ca0e2372299" (UID: "bcfd976b-0081-44f8-b0f4-2ca0e2372299"). InnerVolumeSpecName "kube-api-access-4mg2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.718735 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory" (OuterVolumeSpecName: "inventory") pod "bcfd976b-0081-44f8-b0f4-2ca0e2372299" (UID: "bcfd976b-0081-44f8-b0f4-2ca0e2372299"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.722840 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bcfd976b-0081-44f8-b0f4-2ca0e2372299" (UID: "bcfd976b-0081-44f8-b0f4-2ca0e2372299"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.761605 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mg2n\" (UniqueName: \"kubernetes.io/projected/bcfd976b-0081-44f8-b0f4-2ca0e2372299-kube-api-access-4mg2n\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.761750 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:51 crc kubenswrapper[4760]: I1124 17:24:51.761834 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcfd976b-0081-44f8-b0f4-2ca0e2372299-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.084889 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" event={"ID":"bcfd976b-0081-44f8-b0f4-2ca0e2372299","Type":"ContainerDied","Data":"b7541f4981322885d5c4b1a9b5702de1383fb231680fb550a09003cee43d4820"} Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.084934 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7541f4981322885d5c4b1a9b5702de1383fb231680fb550a09003cee43d4820" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.084994 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-7jrgz" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.188997 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm"] Nov 24 17:24:52 crc kubenswrapper[4760]: E1124 17:24:52.189406 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfd976b-0081-44f8-b0f4-2ca0e2372299" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.189424 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfd976b-0081-44f8-b0f4-2ca0e2372299" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.189618 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfd976b-0081-44f8-b0f4-2ca0e2372299" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.192327 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.194937 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.195368 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.196210 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.197730 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.200607 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm"] Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.271784 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn7nz\" (UniqueName: \"kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.271835 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.271865 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.271908 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.373960 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.374248 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn7nz\" (UniqueName: \"kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.374297 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.374330 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.378953 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.379957 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.380285 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.391719 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn7nz\" (UniqueName: \"kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:52 crc kubenswrapper[4760]: I1124 17:24:52.509370 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:24:53 crc kubenswrapper[4760]: I1124 17:24:53.778247 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm"] Nov 24 17:24:54 crc kubenswrapper[4760]: I1124 17:24:54.102821 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" event={"ID":"b163f1e6-048b-4722-bb36-4cd23619b927","Type":"ContainerStarted","Data":"54cc92e952a2f55a4aa7e34db7ecac2aee211f0465b1349f433071d2ed1f36df"} Nov 24 17:24:55 crc kubenswrapper[4760]: I1124 17:24:55.118897 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" event={"ID":"b163f1e6-048b-4722-bb36-4cd23619b927","Type":"ContainerStarted","Data":"b02186165ff4bdc9c382ddf98256cbaa745e9ac9316c7a933496e111a8604dd0"} Nov 24 17:24:55 crc kubenswrapper[4760]: I1124 17:24:55.152447 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" podStartSLOduration=2.3869384350000002 podStartE2EDuration="3.152416659s" podCreationTimestamp="2025-11-24 17:24:52 +0000 UTC" firstStartedPulling="2025-11-24 17:24:53.791493738 +0000 UTC m=+1289.114375298" lastFinishedPulling="2025-11-24 17:24:54.556971962 +0000 UTC m=+1289.879853522" observedRunningTime="2025-11-24 17:24:55.141421084 +0000 UTC m=+1290.464302674" watchObservedRunningTime="2025-11-24 17:24:55.152416659 +0000 UTC m=+1290.475298249" Nov 24 17:25:30 crc kubenswrapper[4760]: I1124 17:25:30.710768 4760 scope.go:117] "RemoveContainer" containerID="f8fa40b17e898905ec83c516573a21e4707a3508f1c55da17dccf91f0952b4d0" Nov 24 17:25:30 crc kubenswrapper[4760]: I1124 17:25:30.731735 4760 scope.go:117] "RemoveContainer" containerID="f8af48a875d829ae5d11e97bfe587d2c22287076bbb52fce432ceba139826cd5" Nov 24 17:25:30 crc kubenswrapper[4760]: I1124 17:25:30.754161 4760 scope.go:117] "RemoveContainer" containerID="72a1828d7eeb280cd09c64874cdbe04be5bc19f6b8bc85331bcdd4ed7c261b65" Nov 24 17:25:35 crc kubenswrapper[4760]: I1124 17:25:35.642464 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:25:35 crc kubenswrapper[4760]: I1124 17:25:35.643073 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:26:05 crc kubenswrapper[4760]: I1124 17:26:05.642437 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:26:05 crc kubenswrapper[4760]: I1124 17:26:05.642993 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:26:30 crc kubenswrapper[4760]: I1124 17:26:30.878101 4760 scope.go:117] "RemoveContainer" containerID="390e1a2dfd64e49424059bac17bb0c826f2e248e2b831d5dafeb3975570fffb2" Nov 24 17:26:30 crc kubenswrapper[4760]: I1124 17:26:30.930706 4760 scope.go:117] "RemoveContainer" containerID="d2fffbf61695d3305b4722e52a2e92317c60f9d892630df88bd086c8ac5be608" Nov 24 17:26:35 crc kubenswrapper[4760]: I1124 17:26:35.642649 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:26:35 crc kubenswrapper[4760]: I1124 17:26:35.643423 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:26:35 crc kubenswrapper[4760]: I1124 17:26:35.643484 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:26:35 crc kubenswrapper[4760]: I1124 17:26:35.644545 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:26:35 crc kubenswrapper[4760]: I1124 17:26:35.644629 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796" gracePeriod=600 Nov 24 17:26:36 crc kubenswrapper[4760]: I1124 17:26:36.162944 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796" exitCode=0 Nov 24 17:26:36 crc kubenswrapper[4760]: I1124 17:26:36.163012 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796"} Nov 24 17:26:36 crc kubenswrapper[4760]: I1124 17:26:36.163777 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5"} Nov 24 17:26:36 crc kubenswrapper[4760]: I1124 17:26:36.163820 4760 scope.go:117] "RemoveContainer" containerID="99978461206f93941fdae109daf9e539be0cb0e8e8e501e548ac8d9b42e27a5f" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.456566 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.459665 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.469235 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.546949 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.547054 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm7z2\" (UniqueName: \"kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.547163 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.649097 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm7z2\" (UniqueName: \"kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.649220 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.649361 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.649876 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.650045 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.675509 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm7z2\" (UniqueName: \"kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2\") pod \"redhat-operators-tphsm\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:06 crc kubenswrapper[4760]: I1124 17:27:06.810202 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:07 crc kubenswrapper[4760]: I1124 17:27:07.299236 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:07 crc kubenswrapper[4760]: I1124 17:27:07.500453 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerStarted","Data":"218762d9e8fe0decb557e9c7078b6eeab0cf298965099e1c68e72c7061eafc49"} Nov 24 17:27:08 crc kubenswrapper[4760]: I1124 17:27:08.511599 4760 generic.go:334] "Generic (PLEG): container finished" podID="d0305524-b41e-4bda-ba70-396ad039fef1" containerID="58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2" exitCode=0 Nov 24 17:27:08 crc kubenswrapper[4760]: I1124 17:27:08.511699 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerDied","Data":"58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2"} Nov 24 17:27:09 crc kubenswrapper[4760]: I1124 17:27:09.522175 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerStarted","Data":"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db"} Nov 24 17:27:10 crc kubenswrapper[4760]: I1124 17:27:10.533629 4760 generic.go:334] "Generic (PLEG): container finished" podID="d0305524-b41e-4bda-ba70-396ad039fef1" containerID="49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db" exitCode=0 Nov 24 17:27:10 crc kubenswrapper[4760]: I1124 17:27:10.533847 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerDied","Data":"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db"} Nov 24 17:27:11 crc kubenswrapper[4760]: I1124 17:27:11.545665 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerStarted","Data":"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc"} Nov 24 17:27:11 crc kubenswrapper[4760]: I1124 17:27:11.570947 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tphsm" podStartSLOduration=2.970956315 podStartE2EDuration="5.57092835s" podCreationTimestamp="2025-11-24 17:27:06 +0000 UTC" firstStartedPulling="2025-11-24 17:27:08.513678109 +0000 UTC m=+1423.836559669" lastFinishedPulling="2025-11-24 17:27:11.113650144 +0000 UTC m=+1426.436531704" observedRunningTime="2025-11-24 17:27:11.565769752 +0000 UTC m=+1426.888651322" watchObservedRunningTime="2025-11-24 17:27:11.57092835 +0000 UTC m=+1426.893809900" Nov 24 17:27:16 crc kubenswrapper[4760]: I1124 17:27:16.810594 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:16 crc kubenswrapper[4760]: I1124 17:27:16.812945 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:16 crc kubenswrapper[4760]: I1124 17:27:16.869632 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:17 crc kubenswrapper[4760]: I1124 17:27:17.651342 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:17 crc kubenswrapper[4760]: I1124 17:27:17.706668 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:19 crc kubenswrapper[4760]: I1124 17:27:19.628903 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tphsm" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="registry-server" containerID="cri-o://eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc" gracePeriod=2 Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.067273 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.225645 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities\") pod \"d0305524-b41e-4bda-ba70-396ad039fef1\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.225755 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm7z2\" (UniqueName: \"kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2\") pod \"d0305524-b41e-4bda-ba70-396ad039fef1\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.225844 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content\") pod \"d0305524-b41e-4bda-ba70-396ad039fef1\" (UID: \"d0305524-b41e-4bda-ba70-396ad039fef1\") " Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.228849 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities" (OuterVolumeSpecName: "utilities") pod "d0305524-b41e-4bda-ba70-396ad039fef1" (UID: "d0305524-b41e-4bda-ba70-396ad039fef1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.232863 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2" (OuterVolumeSpecName: "kube-api-access-pm7z2") pod "d0305524-b41e-4bda-ba70-396ad039fef1" (UID: "d0305524-b41e-4bda-ba70-396ad039fef1"). InnerVolumeSpecName "kube-api-access-pm7z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.311858 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0305524-b41e-4bda-ba70-396ad039fef1" (UID: "d0305524-b41e-4bda-ba70-396ad039fef1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.328820 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.328849 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0305524-b41e-4bda-ba70-396ad039fef1-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.328861 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm7z2\" (UniqueName: \"kubernetes.io/projected/d0305524-b41e-4bda-ba70-396ad039fef1-kube-api-access-pm7z2\") on node \"crc\" DevicePath \"\"" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.644643 4760 generic.go:334] "Generic (PLEG): container finished" podID="d0305524-b41e-4bda-ba70-396ad039fef1" containerID="eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc" exitCode=0 Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.644718 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tphsm" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.644774 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerDied","Data":"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc"} Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.646126 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tphsm" event={"ID":"d0305524-b41e-4bda-ba70-396ad039fef1","Type":"ContainerDied","Data":"218762d9e8fe0decb557e9c7078b6eeab0cf298965099e1c68e72c7061eafc49"} Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.646160 4760 scope.go:117] "RemoveContainer" containerID="eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.676050 4760 scope.go:117] "RemoveContainer" containerID="49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.701417 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.714789 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tphsm"] Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.730933 4760 scope.go:117] "RemoveContainer" containerID="58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.774234 4760 scope.go:117] "RemoveContainer" containerID="eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc" Nov 24 17:27:20 crc kubenswrapper[4760]: E1124 17:27:20.774691 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc\": container with ID starting with eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc not found: ID does not exist" containerID="eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.774737 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc"} err="failed to get container status \"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc\": rpc error: code = NotFound desc = could not find container \"eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc\": container with ID starting with eb6d676832cf7a18dcc82c93b76793fb38384a7b45fd55b06813173a3049efcc not found: ID does not exist" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.774769 4760 scope.go:117] "RemoveContainer" containerID="49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db" Nov 24 17:27:20 crc kubenswrapper[4760]: E1124 17:27:20.775153 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db\": container with ID starting with 49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db not found: ID does not exist" containerID="49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.775184 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db"} err="failed to get container status \"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db\": rpc error: code = NotFound desc = could not find container \"49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db\": container with ID starting with 49bc673bef2e1b8fbcb9823d5230799a5d22316cf4ae3ea254901193ee0e92db not found: ID does not exist" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.775210 4760 scope.go:117] "RemoveContainer" containerID="58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2" Nov 24 17:27:20 crc kubenswrapper[4760]: E1124 17:27:20.775477 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2\": container with ID starting with 58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2 not found: ID does not exist" containerID="58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2" Nov 24 17:27:20 crc kubenswrapper[4760]: I1124 17:27:20.775508 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2"} err="failed to get container status \"58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2\": rpc error: code = NotFound desc = could not find container \"58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2\": container with ID starting with 58358c2a13c0eac0bb3e06e0362adc3c75d5fba1d131a8c548c0dd87ae7058b2 not found: ID does not exist" Nov 24 17:27:21 crc kubenswrapper[4760]: I1124 17:27:21.477699 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" path="/var/lib/kubelet/pods/d0305524-b41e-4bda-ba70-396ad039fef1/volumes" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.019232 4760 scope.go:117] "RemoveContainer" containerID="5c8b31f23dcf809ae5cf99f6650d661c80ce89f0d065220e8d970e2e3ad1edb6" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.044585 4760 scope.go:117] "RemoveContainer" containerID="7a8ef8a380b7e636ab00608764debb57fbc796e051d126785667d33c452e65f5" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.065793 4760 scope.go:117] "RemoveContainer" containerID="b6378aeec00227a8350f88f40782b4a4c74ca74596cd4c08ab17a27f891a08e9" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.084996 4760 scope.go:117] "RemoveContainer" containerID="7e6f564b7b0d257978f915f514f6e8bd0dcfd51e7f699a3661ffda19e9b6c2c9" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.101512 4760 scope.go:117] "RemoveContainer" containerID="431a955c05fd285060fbe817b155ffb4b9010e21a78b7521af4efab27116c242" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.120733 4760 scope.go:117] "RemoveContainer" containerID="4ceadab664fbbdc018837f0ec1387ba50226496ced2a45762f73314add03ee9a" Nov 24 17:27:31 crc kubenswrapper[4760]: I1124 17:27:31.149460 4760 scope.go:117] "RemoveContainer" containerID="166f43c879b19974c5856a4fe763fca6c270f7017cb9ba551badf7c19127071d" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.411141 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:27:39 crc kubenswrapper[4760]: E1124 17:27:39.412329 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="extract-content" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.412351 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="extract-content" Nov 24 17:27:39 crc kubenswrapper[4760]: E1124 17:27:39.412402 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="extract-utilities" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.412415 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="extract-utilities" Nov 24 17:27:39 crc kubenswrapper[4760]: E1124 17:27:39.412438 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="registry-server" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.412450 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="registry-server" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.412718 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0305524-b41e-4bda-ba70-396ad039fef1" containerName="registry-server" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.417983 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.420722 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.530882 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jqgh\" (UniqueName: \"kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.531345 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.531735 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.633786 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jqgh\" (UniqueName: \"kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.633856 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.634377 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.634513 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.634790 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.651958 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jqgh\" (UniqueName: \"kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh\") pod \"redhat-marketplace-dd9tw\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:39 crc kubenswrapper[4760]: I1124 17:27:39.742890 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:40 crc kubenswrapper[4760]: I1124 17:27:40.271947 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:27:40 crc kubenswrapper[4760]: I1124 17:27:40.877858 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerID="380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9" exitCode=0 Nov 24 17:27:40 crc kubenswrapper[4760]: I1124 17:27:40.877913 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerDied","Data":"380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9"} Nov 24 17:27:40 crc kubenswrapper[4760]: I1124 17:27:40.877969 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerStarted","Data":"963c192c07a829ab40a1db0f9603710bbe59cb5c23ed8948dde21a9c60f62d65"} Nov 24 17:27:45 crc kubenswrapper[4760]: I1124 17:27:45.921713 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerID="90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0" exitCode=0 Nov 24 17:27:45 crc kubenswrapper[4760]: I1124 17:27:45.921811 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerDied","Data":"90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0"} Nov 24 17:27:48 crc kubenswrapper[4760]: I1124 17:27:48.949275 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerStarted","Data":"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c"} Nov 24 17:27:48 crc kubenswrapper[4760]: I1124 17:27:48.973883 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dd9tw" podStartSLOduration=2.853802206 podStartE2EDuration="9.973864434s" podCreationTimestamp="2025-11-24 17:27:39 +0000 UTC" firstStartedPulling="2025-11-24 17:27:40.881756853 +0000 UTC m=+1456.204638403" lastFinishedPulling="2025-11-24 17:27:48.001819061 +0000 UTC m=+1463.324700631" observedRunningTime="2025-11-24 17:27:48.964843975 +0000 UTC m=+1464.287725525" watchObservedRunningTime="2025-11-24 17:27:48.973864434 +0000 UTC m=+1464.296745984" Nov 24 17:27:49 crc kubenswrapper[4760]: I1124 17:27:49.743443 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:49 crc kubenswrapper[4760]: I1124 17:27:49.743771 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:49 crc kubenswrapper[4760]: I1124 17:27:49.797597 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:59 crc kubenswrapper[4760]: I1124 17:27:59.792330 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:27:59 crc kubenswrapper[4760]: I1124 17:27:59.838698 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.049034 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dd9tw" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="registry-server" containerID="cri-o://422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c" gracePeriod=2 Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.522358 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.571225 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content\") pod \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.571331 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities\") pod \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.571568 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jqgh\" (UniqueName: \"kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh\") pod \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\" (UID: \"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f\") " Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.572288 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities" (OuterVolumeSpecName: "utilities") pod "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" (UID: "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.577603 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh" (OuterVolumeSpecName: "kube-api-access-4jqgh") pod "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" (UID: "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f"). InnerVolumeSpecName "kube-api-access-4jqgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.590530 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" (UID: "0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.674197 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jqgh\" (UniqueName: \"kubernetes.io/projected/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-kube-api-access-4jqgh\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.674246 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:00 crc kubenswrapper[4760]: I1124 17:28:00.674258 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.059305 4760 generic.go:334] "Generic (PLEG): container finished" podID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerID="422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c" exitCode=0 Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.059353 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerDied","Data":"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c"} Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.059380 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dd9tw" event={"ID":"0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f","Type":"ContainerDied","Data":"963c192c07a829ab40a1db0f9603710bbe59cb5c23ed8948dde21a9c60f62d65"} Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.059380 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dd9tw" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.059395 4760 scope.go:117] "RemoveContainer" containerID="422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.089572 4760 scope.go:117] "RemoveContainer" containerID="90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.093752 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.102822 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dd9tw"] Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.124205 4760 scope.go:117] "RemoveContainer" containerID="380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.149168 4760 scope.go:117] "RemoveContainer" containerID="422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c" Nov 24 17:28:01 crc kubenswrapper[4760]: E1124 17:28:01.149576 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c\": container with ID starting with 422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c not found: ID does not exist" containerID="422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.149614 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c"} err="failed to get container status \"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c\": rpc error: code = NotFound desc = could not find container \"422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c\": container with ID starting with 422c211f32d1fd29b8afce7d3d5a2f7ab1e273fc10289c5a65d1f1b985e4101c not found: ID does not exist" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.149644 4760 scope.go:117] "RemoveContainer" containerID="90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0" Nov 24 17:28:01 crc kubenswrapper[4760]: E1124 17:28:01.150002 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0\": container with ID starting with 90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0 not found: ID does not exist" containerID="90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.150090 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0"} err="failed to get container status \"90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0\": rpc error: code = NotFound desc = could not find container \"90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0\": container with ID starting with 90c0d049435fb998f539cdc0d29b2d8160ef6b2590930305a457166057fef8a0 not found: ID does not exist" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.150107 4760 scope.go:117] "RemoveContainer" containerID="380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9" Nov 24 17:28:01 crc kubenswrapper[4760]: E1124 17:28:01.150557 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9\": container with ID starting with 380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9 not found: ID does not exist" containerID="380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.150580 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9"} err="failed to get container status \"380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9\": rpc error: code = NotFound desc = could not find container \"380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9\": container with ID starting with 380567e9ec56498bc4ca7a741b8e85c5de3a4a6e510bf21b335518ec790928f9 not found: ID does not exist" Nov 24 17:28:01 crc kubenswrapper[4760]: I1124 17:28:01.478133 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" path="/var/lib/kubelet/pods/0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f/volumes" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.384051 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:07 crc kubenswrapper[4760]: E1124 17:28:07.384910 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="extract-utilities" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.384925 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="extract-utilities" Nov 24 17:28:07 crc kubenswrapper[4760]: E1124 17:28:07.384935 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="registry-server" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.384941 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="registry-server" Nov 24 17:28:07 crc kubenswrapper[4760]: E1124 17:28:07.384962 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="extract-content" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.384968 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="extract-content" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.385202 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d9929a3-872f-4476-a8ba-e6bd8ba3ff7f" containerName="registry-server" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.386679 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.397143 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.588502 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.588831 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9bnn\" (UniqueName: \"kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.589101 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.690514 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.690650 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.690686 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9bnn\" (UniqueName: \"kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.691063 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.691316 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:07 crc kubenswrapper[4760]: I1124 17:28:07.719976 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9bnn\" (UniqueName: \"kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn\") pod \"community-operators-fv2rb\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:08 crc kubenswrapper[4760]: I1124 17:28:08.010250 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:08 crc kubenswrapper[4760]: I1124 17:28:08.492953 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:09 crc kubenswrapper[4760]: I1124 17:28:09.161024 4760 generic.go:334] "Generic (PLEG): container finished" podID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerID="321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0" exitCode=0 Nov 24 17:28:09 crc kubenswrapper[4760]: I1124 17:28:09.161119 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerDied","Data":"321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0"} Nov 24 17:28:09 crc kubenswrapper[4760]: I1124 17:28:09.161358 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerStarted","Data":"8c1dea688be5de39b3ae5b93b611e8a1fed40a9713944453beea1fd6b44cd219"} Nov 24 17:28:10 crc kubenswrapper[4760]: I1124 17:28:10.170919 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerStarted","Data":"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80"} Nov 24 17:28:11 crc kubenswrapper[4760]: I1124 17:28:11.183811 4760 generic.go:334] "Generic (PLEG): container finished" podID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerID="be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80" exitCode=0 Nov 24 17:28:11 crc kubenswrapper[4760]: I1124 17:28:11.183863 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerDied","Data":"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80"} Nov 24 17:28:12 crc kubenswrapper[4760]: I1124 17:28:12.193245 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerStarted","Data":"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2"} Nov 24 17:28:12 crc kubenswrapper[4760]: I1124 17:28:12.227081 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fv2rb" podStartSLOduration=2.8129872110000003 podStartE2EDuration="5.227062976s" podCreationTimestamp="2025-11-24 17:28:07 +0000 UTC" firstStartedPulling="2025-11-24 17:28:09.16303573 +0000 UTC m=+1484.485917280" lastFinishedPulling="2025-11-24 17:28:11.577111495 +0000 UTC m=+1486.899993045" observedRunningTime="2025-11-24 17:28:12.218815609 +0000 UTC m=+1487.541697159" watchObservedRunningTime="2025-11-24 17:28:12.227062976 +0000 UTC m=+1487.549944536" Nov 24 17:28:18 crc kubenswrapper[4760]: I1124 17:28:18.011172 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:18 crc kubenswrapper[4760]: I1124 17:28:18.011941 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:18 crc kubenswrapper[4760]: I1124 17:28:18.102589 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:18 crc kubenswrapper[4760]: I1124 17:28:18.294586 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:18 crc kubenswrapper[4760]: I1124 17:28:18.360148 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.264713 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fv2rb" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="registry-server" containerID="cri-o://2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2" gracePeriod=2 Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.697039 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.821818 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9bnn\" (UniqueName: \"kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn\") pod \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.821911 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities\") pod \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.822074 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content\") pod \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\" (UID: \"a35e41ef-29dd-4a8b-8011-bc813fd565f6\") " Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.822857 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities" (OuterVolumeSpecName: "utilities") pod "a35e41ef-29dd-4a8b-8011-bc813fd565f6" (UID: "a35e41ef-29dd-4a8b-8011-bc813fd565f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.827054 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn" (OuterVolumeSpecName: "kube-api-access-m9bnn") pod "a35e41ef-29dd-4a8b-8011-bc813fd565f6" (UID: "a35e41ef-29dd-4a8b-8011-bc813fd565f6"). InnerVolumeSpecName "kube-api-access-m9bnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.924751 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9bnn\" (UniqueName: \"kubernetes.io/projected/a35e41ef-29dd-4a8b-8011-bc813fd565f6-kube-api-access-m9bnn\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:20 crc kubenswrapper[4760]: I1124 17:28:20.924783 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.276185 4760 generic.go:334] "Generic (PLEG): container finished" podID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerID="2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2" exitCode=0 Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.276237 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerDied","Data":"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2"} Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.276305 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fv2rb" event={"ID":"a35e41ef-29dd-4a8b-8011-bc813fd565f6","Type":"ContainerDied","Data":"8c1dea688be5de39b3ae5b93b611e8a1fed40a9713944453beea1fd6b44cd219"} Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.276330 4760 scope.go:117] "RemoveContainer" containerID="2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.276261 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fv2rb" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.303559 4760 scope.go:117] "RemoveContainer" containerID="be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.339716 4760 scope.go:117] "RemoveContainer" containerID="321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.390053 4760 scope.go:117] "RemoveContainer" containerID="2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2" Nov 24 17:28:21 crc kubenswrapper[4760]: E1124 17:28:21.390493 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2\": container with ID starting with 2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2 not found: ID does not exist" containerID="2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.390530 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2"} err="failed to get container status \"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2\": rpc error: code = NotFound desc = could not find container \"2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2\": container with ID starting with 2663d39f51c0969d3e49ca316dd27a8c43353e69b8f1c0be7b6d71ba19f342a2 not found: ID does not exist" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.390562 4760 scope.go:117] "RemoveContainer" containerID="be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80" Nov 24 17:28:21 crc kubenswrapper[4760]: E1124 17:28:21.390950 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80\": container with ID starting with be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80 not found: ID does not exist" containerID="be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.390986 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80"} err="failed to get container status \"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80\": rpc error: code = NotFound desc = could not find container \"be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80\": container with ID starting with be4f44eb6d63a1fe49404d5f20c7be8a0a605235b858ee0c4dd04fc2fb2f9d80 not found: ID does not exist" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.391013 4760 scope.go:117] "RemoveContainer" containerID="321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0" Nov 24 17:28:21 crc kubenswrapper[4760]: E1124 17:28:21.391258 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0\": container with ID starting with 321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0 not found: ID does not exist" containerID="321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.391278 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0"} err="failed to get container status \"321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0\": rpc error: code = NotFound desc = could not find container \"321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0\": container with ID starting with 321012c1fc59f2db87ae034ca461da1565320c95122d8f55118669578a9a04b0 not found: ID does not exist" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.553605 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a35e41ef-29dd-4a8b-8011-bc813fd565f6" (UID: "a35e41ef-29dd-4a8b-8011-bc813fd565f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.620671 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.628122 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fv2rb"] Nov 24 17:28:21 crc kubenswrapper[4760]: I1124 17:28:21.638955 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a35e41ef-29dd-4a8b-8011-bc813fd565f6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:22 crc kubenswrapper[4760]: I1124 17:28:22.285622 4760 generic.go:334] "Generic (PLEG): container finished" podID="b163f1e6-048b-4722-bb36-4cd23619b927" containerID="b02186165ff4bdc9c382ddf98256cbaa745e9ac9316c7a933496e111a8604dd0" exitCode=0 Nov 24 17:28:22 crc kubenswrapper[4760]: I1124 17:28:22.285715 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" event={"ID":"b163f1e6-048b-4722-bb36-4cd23619b927","Type":"ContainerDied","Data":"b02186165ff4bdc9c382ddf98256cbaa745e9ac9316c7a933496e111a8604dd0"} Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.484594 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" path="/var/lib/kubelet/pods/a35e41ef-29dd-4a8b-8011-bc813fd565f6/volumes" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.734048 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.881285 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle\") pod \"b163f1e6-048b-4722-bb36-4cd23619b927\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.881364 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn7nz\" (UniqueName: \"kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz\") pod \"b163f1e6-048b-4722-bb36-4cd23619b927\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.881438 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key\") pod \"b163f1e6-048b-4722-bb36-4cd23619b927\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.881514 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory\") pod \"b163f1e6-048b-4722-bb36-4cd23619b927\" (UID: \"b163f1e6-048b-4722-bb36-4cd23619b927\") " Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.886768 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b163f1e6-048b-4722-bb36-4cd23619b927" (UID: "b163f1e6-048b-4722-bb36-4cd23619b927"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.887243 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz" (OuterVolumeSpecName: "kube-api-access-sn7nz") pod "b163f1e6-048b-4722-bb36-4cd23619b927" (UID: "b163f1e6-048b-4722-bb36-4cd23619b927"). InnerVolumeSpecName "kube-api-access-sn7nz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.931834 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b163f1e6-048b-4722-bb36-4cd23619b927" (UID: "b163f1e6-048b-4722-bb36-4cd23619b927"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.934513 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory" (OuterVolumeSpecName: "inventory") pod "b163f1e6-048b-4722-bb36-4cd23619b927" (UID: "b163f1e6-048b-4722-bb36-4cd23619b927"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.984512 4760 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.984605 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn7nz\" (UniqueName: \"kubernetes.io/projected/b163f1e6-048b-4722-bb36-4cd23619b927-kube-api-access-sn7nz\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.984620 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:23 crc kubenswrapper[4760]: I1124 17:28:23.984634 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b163f1e6-048b-4722-bb36-4cd23619b927-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.315160 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" event={"ID":"b163f1e6-048b-4722-bb36-4cd23619b927","Type":"ContainerDied","Data":"54cc92e952a2f55a4aa7e34db7ecac2aee211f0465b1349f433071d2ed1f36df"} Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.315541 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54cc92e952a2f55a4aa7e34db7ecac2aee211f0465b1349f433071d2ed1f36df" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.315240 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.420744 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr"] Nov 24 17:28:24 crc kubenswrapper[4760]: E1124 17:28:24.421349 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="extract-utilities" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421375 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="extract-utilities" Nov 24 17:28:24 crc kubenswrapper[4760]: E1124 17:28:24.421399 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="registry-server" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421408 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="registry-server" Nov 24 17:28:24 crc kubenswrapper[4760]: E1124 17:28:24.421431 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b163f1e6-048b-4722-bb36-4cd23619b927" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421440 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="b163f1e6-048b-4722-bb36-4cd23619b927" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 17:28:24 crc kubenswrapper[4760]: E1124 17:28:24.421471 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="extract-content" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421481 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="extract-content" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421711 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="b163f1e6-048b-4722-bb36-4cd23619b927" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.421754 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35e41ef-29dd-4a8b-8011-bc813fd565f6" containerName="registry-server" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.422649 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.425262 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.425786 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.426489 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.427660 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.438887 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr"] Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.595466 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.595529 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7k8c\" (UniqueName: \"kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.595616 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.699490 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.699770 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.699830 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7k8c\" (UniqueName: \"kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.706149 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.713703 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.729655 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7k8c\" (UniqueName: \"kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:24 crc kubenswrapper[4760]: I1124 17:28:24.757943 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:28:25 crc kubenswrapper[4760]: I1124 17:28:25.382749 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr"] Nov 24 17:28:25 crc kubenswrapper[4760]: I1124 17:28:25.877870 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:28:26 crc kubenswrapper[4760]: I1124 17:28:26.339567 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" event={"ID":"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e","Type":"ContainerStarted","Data":"8d265dfcfc9665debc1a70bc4327471936ea92446e2f05c36599dbc3cdd115a7"} Nov 24 17:28:26 crc kubenswrapper[4760]: I1124 17:28:26.339987 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" event={"ID":"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e","Type":"ContainerStarted","Data":"5ced9760b0e87ab39d7e7fd634932039ac59c044af91343c1168d2bda8f7de77"} Nov 24 17:28:26 crc kubenswrapper[4760]: I1124 17:28:26.363799 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" podStartSLOduration=1.883387881 podStartE2EDuration="2.363776572s" podCreationTimestamp="2025-11-24 17:28:24 +0000 UTC" firstStartedPulling="2025-11-24 17:28:25.394405673 +0000 UTC m=+1500.717287233" lastFinishedPulling="2025-11-24 17:28:25.874794374 +0000 UTC m=+1501.197675924" observedRunningTime="2025-11-24 17:28:26.358490011 +0000 UTC m=+1501.681371581" watchObservedRunningTime="2025-11-24 17:28:26.363776572 +0000 UTC m=+1501.686658132" Nov 24 17:28:31 crc kubenswrapper[4760]: I1124 17:28:31.269521 4760 scope.go:117] "RemoveContainer" containerID="e0846ff9301355e0564be5cb862a556e20c22383598b1d7463473062e200cf42" Nov 24 17:28:31 crc kubenswrapper[4760]: I1124 17:28:31.301696 4760 scope.go:117] "RemoveContainer" containerID="c4168340d9217021e68a6868dfc0363a2bc35187b2704860d31f25ab6293d563" Nov 24 17:28:31 crc kubenswrapper[4760]: I1124 17:28:31.335806 4760 scope.go:117] "RemoveContainer" containerID="1bccadad484fe72ceed7d6cb49d49cd8b3761d9b7328f836a8bbe9e0bb89d306" Nov 24 17:28:31 crc kubenswrapper[4760]: I1124 17:28:31.358273 4760 scope.go:117] "RemoveContainer" containerID="88993599c4082243864390714cd9c961b2aa31184a1c1fbb31bedb247f4a2179" Nov 24 17:28:35 crc kubenswrapper[4760]: I1124 17:28:35.643326 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:28:35 crc kubenswrapper[4760]: I1124 17:28:35.643948 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.052102 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-3d80-account-create-s4vnd"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.065314 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-gvrlj"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.075263 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-5ktbp"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.084149 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-3d80-account-create-s4vnd"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.091817 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-gvrlj"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.098914 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-5ktbp"] Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.498273 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f38e58f-36e7-44fd-8df7-c4443e47d534" path="/var/lib/kubelet/pods/2f38e58f-36e7-44fd-8df7-c4443e47d534/volumes" Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.500587 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59f377ad-49a5-4ead-a3ab-10a3796a1cf5" path="/var/lib/kubelet/pods/59f377ad-49a5-4ead-a3ab-10a3796a1cf5/volumes" Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.502134 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9460517b-9491-47de-bf02-3809732142c9" path="/var/lib/kubelet/pods/9460517b-9491-47de-bf02-3809732142c9/volumes" Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.642665 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:29:05 crc kubenswrapper[4760]: I1124 17:29:05.642765 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:29:07 crc kubenswrapper[4760]: I1124 17:29:07.030229 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9575-account-create-8zpxz"] Nov 24 17:29:07 crc kubenswrapper[4760]: I1124 17:29:07.039441 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9575-account-create-8zpxz"] Nov 24 17:29:07 crc kubenswrapper[4760]: I1124 17:29:07.476709 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97150276-8639-4c2e-9324-9c3c840f58ec" path="/var/lib/kubelet/pods/97150276-8639-4c2e-9324-9c3c840f58ec/volumes" Nov 24 17:29:14 crc kubenswrapper[4760]: I1124 17:29:14.053507 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-74m25"] Nov 24 17:29:14 crc kubenswrapper[4760]: I1124 17:29:14.067540 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-0ac2-account-create-s59pm"] Nov 24 17:29:14 crc kubenswrapper[4760]: I1124 17:29:14.077345 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-0ac2-account-create-s59pm"] Nov 24 17:29:14 crc kubenswrapper[4760]: I1124 17:29:14.088254 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-74m25"] Nov 24 17:29:15 crc kubenswrapper[4760]: I1124 17:29:15.486741 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5659f320-0add-4e47-a4d9-b0aedac33d73" path="/var/lib/kubelet/pods/5659f320-0add-4e47-a4d9-b0aedac33d73/volumes" Nov 24 17:29:15 crc kubenswrapper[4760]: I1124 17:29:15.490921 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925c6a06-e39d-42ff-abc8-a318552833b1" path="/var/lib/kubelet/pods/925c6a06-e39d-42ff-abc8-a318552833b1/volumes" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.488023 4760 scope.go:117] "RemoveContainer" containerID="db48b3903be23294c4e730f020ec3006d5ea827baca81e7a0abedac397c1b956" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.530065 4760 scope.go:117] "RemoveContainer" containerID="91ec248ba846facb12ac6c07dc8ebe62f07b869d339fd4cba3b73f87e0c02630" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.586563 4760 scope.go:117] "RemoveContainer" containerID="5b25f3663d2b7cb6cfde38ab289c2a57aeb55d4e264e7c88b7684feb0e46591b" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.634454 4760 scope.go:117] "RemoveContainer" containerID="fc3d6defba9a4c29f557db5c8f8eb47a067b2f3bb6017c9608dd683671424f9c" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.703940 4760 scope.go:117] "RemoveContainer" containerID="62a259d59e77310b26333272635c8bc7757823d91b2f3b6099832eed6285f524" Nov 24 17:29:31 crc kubenswrapper[4760]: I1124 17:29:31.767756 4760 scope.go:117] "RemoveContainer" containerID="5d09efc14073b243618ad045a3bbc0eec6d7e4d317e8a2f3249c7f0fa60498d1" Nov 24 17:29:35 crc kubenswrapper[4760]: I1124 17:29:35.642931 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:29:35 crc kubenswrapper[4760]: I1124 17:29:35.643568 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:29:35 crc kubenswrapper[4760]: I1124 17:29:35.643625 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:29:35 crc kubenswrapper[4760]: I1124 17:29:35.644478 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:29:35 crc kubenswrapper[4760]: I1124 17:29:35.644549 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" gracePeriod=600 Nov 24 17:29:35 crc kubenswrapper[4760]: E1124 17:29:35.849505 4760 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf71fb2ac_0373_4606_a20a_0b60ca26fbc3.slice/crio-bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5.scope\": RecentStats: unable to find data in memory cache]" Nov 24 17:29:35 crc kubenswrapper[4760]: E1124 17:29:35.861565 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:29:36 crc kubenswrapper[4760]: I1124 17:29:36.060638 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" exitCode=0 Nov 24 17:29:36 crc kubenswrapper[4760]: I1124 17:29:36.060731 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5"} Nov 24 17:29:36 crc kubenswrapper[4760]: I1124 17:29:36.061037 4760 scope.go:117] "RemoveContainer" containerID="18e435d6f99820c29c6f9c48bc4be513e915a9521aaac2361f225513240d4796" Nov 24 17:29:36 crc kubenswrapper[4760]: I1124 17:29:36.061953 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:29:36 crc kubenswrapper[4760]: E1124 17:29:36.062431 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:29:39 crc kubenswrapper[4760]: I1124 17:29:39.037808 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-kpvf5"] Nov 24 17:29:39 crc kubenswrapper[4760]: I1124 17:29:39.046550 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-kpvf5"] Nov 24 17:29:39 crc kubenswrapper[4760]: I1124 17:29:39.488717 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e036f00c-517f-4156-9e2f-52ce275d44f6" path="/var/lib/kubelet/pods/e036f00c-517f-4156-9e2f-52ce275d44f6/volumes" Nov 24 17:29:49 crc kubenswrapper[4760]: I1124 17:29:49.466928 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:29:49 crc kubenswrapper[4760]: E1124 17:29:49.468639 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.319309 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.321881 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.334024 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.428029 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.428253 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.428273 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz4dj\" (UniqueName: \"kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.529980 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.530046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz4dj\" (UniqueName: \"kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.530070 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.530576 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.530804 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.549647 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz4dj\" (UniqueName: \"kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj\") pod \"certified-operators-khlk8\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.644454 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:29:51 crc kubenswrapper[4760]: I1124 17:29:51.964648 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:29:52 crc kubenswrapper[4760]: I1124 17:29:52.234058 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerStarted","Data":"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4"} Nov 24 17:29:52 crc kubenswrapper[4760]: I1124 17:29:52.234325 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerStarted","Data":"5c15745d8ba6928a62429da1b6abc982fd174a07615bc30f6799cf61af42983e"} Nov 24 17:29:52 crc kubenswrapper[4760]: I1124 17:29:52.235830 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:29:53 crc kubenswrapper[4760]: I1124 17:29:53.244110 4760 generic.go:334] "Generic (PLEG): container finished" podID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerID="14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4" exitCode=0 Nov 24 17:29:53 crc kubenswrapper[4760]: I1124 17:29:53.244204 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerDied","Data":"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4"} Nov 24 17:29:53 crc kubenswrapper[4760]: I1124 17:29:53.244406 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerStarted","Data":"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e"} Nov 24 17:29:54 crc kubenswrapper[4760]: I1124 17:29:54.256044 4760 generic.go:334] "Generic (PLEG): container finished" podID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerID="9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e" exitCode=0 Nov 24 17:29:54 crc kubenswrapper[4760]: I1124 17:29:54.256172 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerDied","Data":"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e"} Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.041722 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-tfxz8"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.051430 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ce5c-account-create-6bcw4"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.061690 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-8cjjt"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.068629 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ce5c-account-create-6bcw4"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.076320 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-tfxz8"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.083614 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-8s242"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.090870 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-8s242"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.098948 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-8cjjt"] Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.268173 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerStarted","Data":"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474"} Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.288929 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-khlk8" podStartSLOduration=1.7266093040000001 podStartE2EDuration="4.288914138s" podCreationTimestamp="2025-11-24 17:29:51 +0000 UTC" firstStartedPulling="2025-11-24 17:29:52.235575327 +0000 UTC m=+1587.558456887" lastFinishedPulling="2025-11-24 17:29:54.797880181 +0000 UTC m=+1590.120761721" observedRunningTime="2025-11-24 17:29:55.285561902 +0000 UTC m=+1590.608443452" watchObservedRunningTime="2025-11-24 17:29:55.288914138 +0000 UTC m=+1590.611795688" Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.477532 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33401460-35b1-40fd-8bf4-3b0e3d7cba89" path="/var/lib/kubelet/pods/33401460-35b1-40fd-8bf4-3b0e3d7cba89/volumes" Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.478343 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="695177f8-fcf1-4cdd-8d7a-b6ae266fe224" path="/var/lib/kubelet/pods/695177f8-fcf1-4cdd-8d7a-b6ae266fe224/volumes" Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.478884 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e76c2370-1f1c-4336-a95b-c5d11492ebe6" path="/var/lib/kubelet/pods/e76c2370-1f1c-4336-a95b-c5d11492ebe6/volumes" Nov 24 17:29:55 crc kubenswrapper[4760]: I1124 17:29:55.479434 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f07cffba-dc3b-495a-a378-135d2b830d7a" path="/var/lib/kubelet/pods/f07cffba-dc3b-495a-a378-135d2b830d7a/volumes" Nov 24 17:29:56 crc kubenswrapper[4760]: I1124 17:29:56.029500 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-400b-account-create-cb447"] Nov 24 17:29:56 crc kubenswrapper[4760]: I1124 17:29:56.042495 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-b2cd-account-create-vmd78"] Nov 24 17:29:56 crc kubenswrapper[4760]: I1124 17:29:56.050829 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-b2cd-account-create-vmd78"] Nov 24 17:29:56 crc kubenswrapper[4760]: I1124 17:29:56.059666 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-400b-account-create-cb447"] Nov 24 17:29:57 crc kubenswrapper[4760]: I1124 17:29:57.479753 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57550e3-ccb2-47ac-bd84-1ed2f7eef985" path="/var/lib/kubelet/pods/c57550e3-ccb2-47ac-bd84-1ed2f7eef985/volumes" Nov 24 17:29:57 crc kubenswrapper[4760]: I1124 17:29:57.481908 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398" path="/var/lib/kubelet/pods/ee83fcdf-cf1d-4e1a-9d95-c9b521cfc398/volumes" Nov 24 17:29:59 crc kubenswrapper[4760]: I1124 17:29:59.033815 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-pbkv7"] Nov 24 17:29:59 crc kubenswrapper[4760]: I1124 17:29:59.039156 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-pbkv7"] Nov 24 17:29:59 crc kubenswrapper[4760]: I1124 17:29:59.477816 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4698227-1751-478c-8996-63502f8c74da" path="/var/lib/kubelet/pods/e4698227-1751-478c-8996-63502f8c74da/volumes" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.141618 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt"] Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.142833 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.144550 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.144703 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.176268 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt"] Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.205851 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.205931 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.206446 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfndw\" (UniqueName: \"kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.349310 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfndw\" (UniqueName: \"kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.349490 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.349519 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.350853 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.361818 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.369255 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfndw\" (UniqueName: \"kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw\") pod \"collect-profiles-29400090-7d4mt\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.473533 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:00 crc kubenswrapper[4760]: I1124 17:30:00.923933 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt"] Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.375193 4760 generic.go:334] "Generic (PLEG): container finished" podID="1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" containerID="b9e1326cc282559860139f653358c2001635fb332f717f0a50234cd0776afeea" exitCode=0 Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.375263 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" event={"ID":"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81","Type":"ContainerDied","Data":"b9e1326cc282559860139f653358c2001635fb332f717f0a50234cd0776afeea"} Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.375469 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" event={"ID":"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81","Type":"ContainerStarted","Data":"dbc1a344cdcba693eef7d424085dbb7a2c0f33527e5d68fcdb7056a46be4b924"} Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.644907 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.645474 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:01 crc kubenswrapper[4760]: I1124 17:30:01.700752 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.436890 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.469158 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:30:02 crc kubenswrapper[4760]: E1124 17:30:02.469481 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.505711 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.727457 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.897460 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume\") pod \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.897771 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfndw\" (UniqueName: \"kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw\") pod \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.897951 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume\") pod \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\" (UID: \"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81\") " Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.898212 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume" (OuterVolumeSpecName: "config-volume") pod "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" (UID: "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.898554 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.903125 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" (UID: "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:30:02 crc kubenswrapper[4760]: I1124 17:30:02.903396 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw" (OuterVolumeSpecName: "kube-api-access-rfndw") pod "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" (UID: "1daffafe-fc2d-4f64-af17-6c6cbbe5bd81"). InnerVolumeSpecName "kube-api-access-rfndw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:30:03 crc kubenswrapper[4760]: I1124 17:30:03.000959 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:03 crc kubenswrapper[4760]: I1124 17:30:03.000992 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfndw\" (UniqueName: \"kubernetes.io/projected/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81-kube-api-access-rfndw\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:03 crc kubenswrapper[4760]: I1124 17:30:03.397225 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" event={"ID":"1daffafe-fc2d-4f64-af17-6c6cbbe5bd81","Type":"ContainerDied","Data":"dbc1a344cdcba693eef7d424085dbb7a2c0f33527e5d68fcdb7056a46be4b924"} Nov 24 17:30:03 crc kubenswrapper[4760]: I1124 17:30:03.397595 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbc1a344cdcba693eef7d424085dbb7a2c0f33527e5d68fcdb7056a46be4b924" Nov 24 17:30:03 crc kubenswrapper[4760]: I1124 17:30:03.397440 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt" Nov 24 17:30:04 crc kubenswrapper[4760]: I1124 17:30:04.408026 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-khlk8" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="registry-server" containerID="cri-o://f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474" gracePeriod=2 Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.405350 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.424229 4760 generic.go:334] "Generic (PLEG): container finished" podID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerID="f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474" exitCode=0 Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.424270 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerDied","Data":"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474"} Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.424301 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khlk8" event={"ID":"54c93e96-2633-4bcb-bd5f-340d7deb7b65","Type":"ContainerDied","Data":"5c15745d8ba6928a62429da1b6abc982fd174a07615bc30f6799cf61af42983e"} Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.424321 4760 scope.go:117] "RemoveContainer" containerID="f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.424326 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khlk8" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.461810 4760 scope.go:117] "RemoveContainer" containerID="9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.484665 4760 scope.go:117] "RemoveContainer" containerID="14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.526468 4760 scope.go:117] "RemoveContainer" containerID="f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474" Nov 24 17:30:05 crc kubenswrapper[4760]: E1124 17:30:05.526836 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474\": container with ID starting with f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474 not found: ID does not exist" containerID="f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.526885 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474"} err="failed to get container status \"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474\": rpc error: code = NotFound desc = could not find container \"f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474\": container with ID starting with f845908a18a8a3222f18877dd2f3ca829b1e721edc0dc68a4ebc74df74272474 not found: ID does not exist" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.526909 4760 scope.go:117] "RemoveContainer" containerID="9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e" Nov 24 17:30:05 crc kubenswrapper[4760]: E1124 17:30:05.527191 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e\": container with ID starting with 9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e not found: ID does not exist" containerID="9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.527214 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e"} err="failed to get container status \"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e\": rpc error: code = NotFound desc = could not find container \"9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e\": container with ID starting with 9a4274f8a6dd3af9c1fd9864a577292f2409b07fa4bd7ce7efbee56bbb15f28e not found: ID does not exist" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.527227 4760 scope.go:117] "RemoveContainer" containerID="14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4" Nov 24 17:30:05 crc kubenswrapper[4760]: E1124 17:30:05.527398 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4\": container with ID starting with 14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4 not found: ID does not exist" containerID="14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.527417 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4"} err="failed to get container status \"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4\": rpc error: code = NotFound desc = could not find container \"14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4\": container with ID starting with 14d0477a1b4d206707134a3a9c309f7b18446b9c98548ce3b2a07db759d2f8d4 not found: ID does not exist" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.549130 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities\") pod \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.549271 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content\") pod \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.549453 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz4dj\" (UniqueName: \"kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj\") pod \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\" (UID: \"54c93e96-2633-4bcb-bd5f-340d7deb7b65\") " Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.550274 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities" (OuterVolumeSpecName: "utilities") pod "54c93e96-2633-4bcb-bd5f-340d7deb7b65" (UID: "54c93e96-2633-4bcb-bd5f-340d7deb7b65"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.557251 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj" (OuterVolumeSpecName: "kube-api-access-qz4dj") pod "54c93e96-2633-4bcb-bd5f-340d7deb7b65" (UID: "54c93e96-2633-4bcb-bd5f-340d7deb7b65"). InnerVolumeSpecName "kube-api-access-qz4dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.595490 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54c93e96-2633-4bcb-bd5f-340d7deb7b65" (UID: "54c93e96-2633-4bcb-bd5f-340d7deb7b65"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.651694 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.652116 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c93e96-2633-4bcb-bd5f-340d7deb7b65-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.652133 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz4dj\" (UniqueName: \"kubernetes.io/projected/54c93e96-2633-4bcb-bd5f-340d7deb7b65-kube-api-access-qz4dj\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.762397 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:30:05 crc kubenswrapper[4760]: I1124 17:30:05.770770 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-khlk8"] Nov 24 17:30:07 crc kubenswrapper[4760]: I1124 17:30:07.476475 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" path="/var/lib/kubelet/pods/54c93e96-2633-4bcb-bd5f-340d7deb7b65/volumes" Nov 24 17:30:12 crc kubenswrapper[4760]: I1124 17:30:12.566116 4760 generic.go:334] "Generic (PLEG): container finished" podID="8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" containerID="8d265dfcfc9665debc1a70bc4327471936ea92446e2f05c36599dbc3cdd115a7" exitCode=0 Nov 24 17:30:12 crc kubenswrapper[4760]: I1124 17:30:12.566236 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" event={"ID":"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e","Type":"ContainerDied","Data":"8d265dfcfc9665debc1a70bc4327471936ea92446e2f05c36599dbc3cdd115a7"} Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.098768 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.216821 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory\") pod \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.216968 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key\") pod \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.217564 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7k8c\" (UniqueName: \"kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c\") pod \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\" (UID: \"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e\") " Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.227248 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c" (OuterVolumeSpecName: "kube-api-access-m7k8c") pod "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" (UID: "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e"). InnerVolumeSpecName "kube-api-access-m7k8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.245307 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" (UID: "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.246790 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory" (OuterVolumeSpecName: "inventory") pod "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" (UID: "8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.320311 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.320339 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.320348 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7k8c\" (UniqueName: \"kubernetes.io/projected/8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e-kube-api-access-m7k8c\") on node \"crc\" DevicePath \"\"" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.586563 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" event={"ID":"8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e","Type":"ContainerDied","Data":"5ced9760b0e87ab39d7e7fd634932039ac59c044af91343c1168d2bda8f7de77"} Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.586603 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.586607 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ced9760b0e87ab39d7e7fd634932039ac59c044af91343c1168d2bda8f7de77" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.672948 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw"] Nov 24 17:30:14 crc kubenswrapper[4760]: E1124 17:30:14.673779 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.673806 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 17:30:14 crc kubenswrapper[4760]: E1124 17:30:14.673831 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="extract-content" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.673840 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="extract-content" Nov 24 17:30:14 crc kubenswrapper[4760]: E1124 17:30:14.673859 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" containerName="collect-profiles" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.673866 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" containerName="collect-profiles" Nov 24 17:30:14 crc kubenswrapper[4760]: E1124 17:30:14.673876 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="registry-server" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.673884 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="registry-server" Nov 24 17:30:14 crc kubenswrapper[4760]: E1124 17:30:14.673918 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="extract-utilities" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.673927 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="extract-utilities" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.674354 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.674392 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" containerName="collect-profiles" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.674408 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c93e96-2633-4bcb-bd5f-340d7deb7b65" containerName="registry-server" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.675226 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.680151 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.680625 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.680888 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.681061 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.683916 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw"] Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.728302 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.728387 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc9ws\" (UniqueName: \"kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.728481 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.829662 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.829791 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.829860 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc9ws\" (UniqueName: \"kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.833324 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.834358 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.852411 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc9ws\" (UniqueName: \"kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:14 crc kubenswrapper[4760]: I1124 17:30:14.997784 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:30:15 crc kubenswrapper[4760]: I1124 17:30:15.473435 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:30:15 crc kubenswrapper[4760]: E1124 17:30:15.473974 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:30:15 crc kubenswrapper[4760]: I1124 17:30:15.601410 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw"] Nov 24 17:30:16 crc kubenswrapper[4760]: I1124 17:30:16.606970 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" event={"ID":"60a5bb95-2a7f-43be-a54f-be0872e8331b","Type":"ContainerStarted","Data":"5f6c28bec53be1b5bb67f69542207c4a3218e2d079d1976862e2ab7ea4d1ecbc"} Nov 24 17:30:16 crc kubenswrapper[4760]: I1124 17:30:16.607528 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" event={"ID":"60a5bb95-2a7f-43be-a54f-be0872e8331b","Type":"ContainerStarted","Data":"bd10afa7c14357c7e8fad286459f99cca77fce5815816e2d33d81c0536b69f8c"} Nov 24 17:30:16 crc kubenswrapper[4760]: I1124 17:30:16.637582 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" podStartSLOduration=2.149186614 podStartE2EDuration="2.637561714s" podCreationTimestamp="2025-11-24 17:30:14 +0000 UTC" firstStartedPulling="2025-11-24 17:30:15.610016018 +0000 UTC m=+1610.932897568" lastFinishedPulling="2025-11-24 17:30:16.098391108 +0000 UTC m=+1611.421272668" observedRunningTime="2025-11-24 17:30:16.629196554 +0000 UTC m=+1611.952078114" watchObservedRunningTime="2025-11-24 17:30:16.637561714 +0000 UTC m=+1611.960443274" Nov 24 17:30:28 crc kubenswrapper[4760]: I1124 17:30:28.042645 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-8x9fd"] Nov 24 17:30:28 crc kubenswrapper[4760]: I1124 17:30:28.043935 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-8x9fd"] Nov 24 17:30:29 crc kubenswrapper[4760]: I1124 17:30:29.466705 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:30:29 crc kubenswrapper[4760]: E1124 17:30:29.467993 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:30:29 crc kubenswrapper[4760]: I1124 17:30:29.479521 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1080c24b-f48e-4150-b4cb-c0b1bf1081e3" path="/var/lib/kubelet/pods/1080c24b-f48e-4150-b4cb-c0b1bf1081e3/volumes" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.041665 4760 scope.go:117] "RemoveContainer" containerID="5e3d83940ec044a41c5fac47c8418bf678378ea338c0cbc4bdafe5e2ff6cde0b" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.071460 4760 scope.go:117] "RemoveContainer" containerID="b449ad24ce613c6512a596bef84f34cff188e37587d6cbeacd0d4cf54b19f22f" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.115161 4760 scope.go:117] "RemoveContainer" containerID="7fedc2bc56f4571d58f77b6b748288010857142ba3b4d8aeb33ac14eb8fd03f1" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.164585 4760 scope.go:117] "RemoveContainer" containerID="77b1a9c3f941b58bd531ae2d0729aad1e639c502dfae3b6abb37ab3aaf918921" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.225563 4760 scope.go:117] "RemoveContainer" containerID="63254f79d27ee8428f886351c1a7412eb58275ec63e328ee4e1741fd902b47d6" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.246507 4760 scope.go:117] "RemoveContainer" containerID="5be91801ae910f735dab71d5f928752d3833df220a53308ed1455434c9e44794" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.308421 4760 scope.go:117] "RemoveContainer" containerID="7c52f1e0579377f97f88ca8d66897c10c040d3d51fdca3dc7abf1d75624b8697" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.333797 4760 scope.go:117] "RemoveContainer" containerID="81ac5b770eaff3bd3b3ef49f81e9f0abea588fcd645008cb799320f83bd47a69" Nov 24 17:30:32 crc kubenswrapper[4760]: I1124 17:30:32.363561 4760 scope.go:117] "RemoveContainer" containerID="9e21020274d85f15b71e7507ea3468d947ea60485bb11a30dd53aeae27d2f58a" Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.036854 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-bkqxk"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.048074 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-bkqxk"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.061051 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-z6w2f"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.076345 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qv7gv"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.083794 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-z6w2f"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.090703 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qv7gv"] Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.477368 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="195eb4e3-2851-4742-ba6a-48f56b7ac231" path="/var/lib/kubelet/pods/195eb4e3-2851-4742-ba6a-48f56b7ac231/volumes" Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.478329 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a" path="/var/lib/kubelet/pods/3eea1be1-6bb4-44b1-8f08-b4fc58bafc4a/volumes" Nov 24 17:30:39 crc kubenswrapper[4760]: I1124 17:30:39.479026 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d21ffd86-bec8-47a2-940e-fc1fcf5d32c7" path="/var/lib/kubelet/pods/d21ffd86-bec8-47a2-940e-fc1fcf5d32c7/volumes" Nov 24 17:30:41 crc kubenswrapper[4760]: I1124 17:30:41.466199 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:30:41 crc kubenswrapper[4760]: E1124 17:30:41.466812 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:30:49 crc kubenswrapper[4760]: I1124 17:30:49.040727 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-gszfs"] Nov 24 17:30:49 crc kubenswrapper[4760]: I1124 17:30:49.047261 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-gszfs"] Nov 24 17:30:49 crc kubenswrapper[4760]: I1124 17:30:49.482307 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="182b9849-0723-4fa8-bade-df2f05e6cf37" path="/var/lib/kubelet/pods/182b9849-0723-4fa8-bade-df2f05e6cf37/volumes" Nov 24 17:30:54 crc kubenswrapper[4760]: I1124 17:30:54.466945 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:30:54 crc kubenswrapper[4760]: E1124 17:30:54.467802 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:31:07 crc kubenswrapper[4760]: I1124 17:31:07.467928 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:31:07 crc kubenswrapper[4760]: E1124 17:31:07.468892 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:31:22 crc kubenswrapper[4760]: I1124 17:31:22.466631 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:31:22 crc kubenswrapper[4760]: E1124 17:31:22.467323 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:31:27 crc kubenswrapper[4760]: I1124 17:31:27.244283 4760 generic.go:334] "Generic (PLEG): container finished" podID="60a5bb95-2a7f-43be-a54f-be0872e8331b" containerID="5f6c28bec53be1b5bb67f69542207c4a3218e2d079d1976862e2ab7ea4d1ecbc" exitCode=0 Nov 24 17:31:27 crc kubenswrapper[4760]: I1124 17:31:27.244362 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" event={"ID":"60a5bb95-2a7f-43be-a54f-be0872e8331b","Type":"ContainerDied","Data":"5f6c28bec53be1b5bb67f69542207c4a3218e2d079d1976862e2ab7ea4d1ecbc"} Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.615168 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.791031 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key\") pod \"60a5bb95-2a7f-43be-a54f-be0872e8331b\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.791149 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory\") pod \"60a5bb95-2a7f-43be-a54f-be0872e8331b\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.791227 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc9ws\" (UniqueName: \"kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws\") pod \"60a5bb95-2a7f-43be-a54f-be0872e8331b\" (UID: \"60a5bb95-2a7f-43be-a54f-be0872e8331b\") " Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.796566 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws" (OuterVolumeSpecName: "kube-api-access-hc9ws") pod "60a5bb95-2a7f-43be-a54f-be0872e8331b" (UID: "60a5bb95-2a7f-43be-a54f-be0872e8331b"). InnerVolumeSpecName "kube-api-access-hc9ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.820503 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory" (OuterVolumeSpecName: "inventory") pod "60a5bb95-2a7f-43be-a54f-be0872e8331b" (UID: "60a5bb95-2a7f-43be-a54f-be0872e8331b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.821100 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "60a5bb95-2a7f-43be-a54f-be0872e8331b" (UID: "60a5bb95-2a7f-43be-a54f-be0872e8331b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.892983 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.893027 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60a5bb95-2a7f-43be-a54f-be0872e8331b-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:28 crc kubenswrapper[4760]: I1124 17:31:28.893037 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc9ws\" (UniqueName: \"kubernetes.io/projected/60a5bb95-2a7f-43be-a54f-be0872e8331b-kube-api-access-hc9ws\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.043465 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-sz6nt"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.055507 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-713e-account-create-7lqfc"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.067578 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-sp75g"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.074177 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-sz6nt"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.079724 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-713e-account-create-7lqfc"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.085128 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-sp75g"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.263086 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" event={"ID":"60a5bb95-2a7f-43be-a54f-be0872e8331b","Type":"ContainerDied","Data":"bd10afa7c14357c7e8fad286459f99cca77fce5815816e2d33d81c0536b69f8c"} Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.263133 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd10afa7c14357c7e8fad286459f99cca77fce5815816e2d33d81c0536b69f8c" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.263140 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.406862 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279"] Nov 24 17:31:29 crc kubenswrapper[4760]: E1124 17:31:29.408498 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60a5bb95-2a7f-43be-a54f-be0872e8331b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.408598 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="60a5bb95-2a7f-43be-a54f-be0872e8331b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.409954 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="60a5bb95-2a7f-43be-a54f-be0872e8331b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.412722 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.425256 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9fzd\" (UniqueName: \"kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.425337 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.425392 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.426818 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.436273 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.437125 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.437311 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.456063 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279"] Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.475838 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4be89ae4-3cce-4eea-b760-0759df25aeaf" path="/var/lib/kubelet/pods/4be89ae4-3cce-4eea-b760-0759df25aeaf/volumes" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.476537 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e2f485a-8ed4-4ba6-a35b-9e5788d46e10" path="/var/lib/kubelet/pods/7e2f485a-8ed4-4ba6-a35b-9e5788d46e10/volumes" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.477115 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2" path="/var/lib/kubelet/pods/f3b3763f-9cbc-47ec-b70c-cc0fc3f289b2/volumes" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.527044 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9fzd\" (UniqueName: \"kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.527114 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.527154 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.531463 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.531577 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.543450 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9fzd\" (UniqueName: \"kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-fx279\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:29 crc kubenswrapper[4760]: I1124 17:31:29.729484 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.029549 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-457ln"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.037517 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-2801-account-create-79wz9"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.046810 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c21f-account-create-s2ss6"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.053475 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-2801-account-create-79wz9"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.061142 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c21f-account-create-s2ss6"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.067708 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-457ln"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.231505 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279"] Nov 24 17:31:30 crc kubenswrapper[4760]: I1124 17:31:30.273532 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" event={"ID":"a548ab89-b523-4f50-b490-7470e05662b6","Type":"ContainerStarted","Data":"eb04c2592af421fb5b792dcdca590f13032fa6f29f0547ca652b42b612a91cb4"} Nov 24 17:31:31 crc kubenswrapper[4760]: I1124 17:31:31.285759 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" event={"ID":"a548ab89-b523-4f50-b490-7470e05662b6","Type":"ContainerStarted","Data":"bb004b7ef47811ca5205ca55bc588e83f717e107aa18b79dc43bffc0f781845a"} Nov 24 17:31:31 crc kubenswrapper[4760]: I1124 17:31:31.307598 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" podStartSLOduration=1.771403125 podStartE2EDuration="2.307579646s" podCreationTimestamp="2025-11-24 17:31:29 +0000 UTC" firstStartedPulling="2025-11-24 17:31:30.238596042 +0000 UTC m=+1685.561477592" lastFinishedPulling="2025-11-24 17:31:30.774772573 +0000 UTC m=+1686.097654113" observedRunningTime="2025-11-24 17:31:31.303776737 +0000 UTC m=+1686.626658287" watchObservedRunningTime="2025-11-24 17:31:31.307579646 +0000 UTC m=+1686.630461186" Nov 24 17:31:31 crc kubenswrapper[4760]: I1124 17:31:31.480766 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66dd0d71-2f7f-485a-9cd7-7b2e84a49a85" path="/var/lib/kubelet/pods/66dd0d71-2f7f-485a-9cd7-7b2e84a49a85/volumes" Nov 24 17:31:31 crc kubenswrapper[4760]: I1124 17:31:31.481543 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdab3c6d-f3c2-4eae-a180-5d7fea562148" path="/var/lib/kubelet/pods/cdab3c6d-f3c2-4eae-a180-5d7fea562148/volumes" Nov 24 17:31:31 crc kubenswrapper[4760]: I1124 17:31:31.482598 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf151a5f-3656-4c41-85ac-f1cdedf67f76" path="/var/lib/kubelet/pods/cf151a5f-3656-4c41-85ac-f1cdedf67f76/volumes" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.540460 4760 scope.go:117] "RemoveContainer" containerID="9b91d2c71656b73469b6636aa25fcb341195df85c3a801b171d580d3a9b28675" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.563279 4760 scope.go:117] "RemoveContainer" containerID="072fda572fe3f02f0334668bd4de8238727b58c9d912850c6bf6edf350d406d6" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.619738 4760 scope.go:117] "RemoveContainer" containerID="a7ee9b9f1e8fc50b7296aa9aaf461510a9b7493d7d00759dcd0df1dc177e1866" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.667297 4760 scope.go:117] "RemoveContainer" containerID="7b88520b6d60ec40bd97a50308d4b2e36b5cfc4b27a80677b3faf35dd7c1a1c6" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.729979 4760 scope.go:117] "RemoveContainer" containerID="52cf2a810a2d225ce7497def9d47942ca61f2f4a03d4bb7b42768af14d0b56b7" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.766121 4760 scope.go:117] "RemoveContainer" containerID="f8ca0b9ea6e99f0a254332e211aa436f353811e80b73a09287daaae7f7db7c64" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.798129 4760 scope.go:117] "RemoveContainer" containerID="e1ca4488952d3e9c07a18aafc349ec3e3459f1ecde5d71083e26821008e6d8b3" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.815997 4760 scope.go:117] "RemoveContainer" containerID="45238f4028a00df9cda45bab3360589679959357592d16bc6a0c416c828ad2bd" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.846333 4760 scope.go:117] "RemoveContainer" containerID="35786298e1e5ae845548c51613f1f30be1dabf1c0b860e2999ddf37d6501f6aa" Nov 24 17:31:32 crc kubenswrapper[4760]: I1124 17:31:32.880071 4760 scope.go:117] "RemoveContainer" containerID="89214f167dc8a23470347ceb4f6e72f95b0a5e8203cb957182b357b505365a7b" Nov 24 17:31:35 crc kubenswrapper[4760]: I1124 17:31:35.472972 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:31:35 crc kubenswrapper[4760]: E1124 17:31:35.473686 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:31:36 crc kubenswrapper[4760]: I1124 17:31:36.330043 4760 generic.go:334] "Generic (PLEG): container finished" podID="a548ab89-b523-4f50-b490-7470e05662b6" containerID="bb004b7ef47811ca5205ca55bc588e83f717e107aa18b79dc43bffc0f781845a" exitCode=0 Nov 24 17:31:36 crc kubenswrapper[4760]: I1124 17:31:36.330341 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" event={"ID":"a548ab89-b523-4f50-b490-7470e05662b6","Type":"ContainerDied","Data":"bb004b7ef47811ca5205ca55bc588e83f717e107aa18b79dc43bffc0f781845a"} Nov 24 17:31:37 crc kubenswrapper[4760]: I1124 17:31:37.800849 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:37 crc kubenswrapper[4760]: I1124 17:31:37.980146 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key\") pod \"a548ab89-b523-4f50-b490-7470e05662b6\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " Nov 24 17:31:37 crc kubenswrapper[4760]: I1124 17:31:37.980241 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory\") pod \"a548ab89-b523-4f50-b490-7470e05662b6\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " Nov 24 17:31:37 crc kubenswrapper[4760]: I1124 17:31:37.980405 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9fzd\" (UniqueName: \"kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd\") pod \"a548ab89-b523-4f50-b490-7470e05662b6\" (UID: \"a548ab89-b523-4f50-b490-7470e05662b6\") " Nov 24 17:31:37 crc kubenswrapper[4760]: I1124 17:31:37.988868 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd" (OuterVolumeSpecName: "kube-api-access-g9fzd") pod "a548ab89-b523-4f50-b490-7470e05662b6" (UID: "a548ab89-b523-4f50-b490-7470e05662b6"). InnerVolumeSpecName "kube-api-access-g9fzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.020850 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory" (OuterVolumeSpecName: "inventory") pod "a548ab89-b523-4f50-b490-7470e05662b6" (UID: "a548ab89-b523-4f50-b490-7470e05662b6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.049037 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a548ab89-b523-4f50-b490-7470e05662b6" (UID: "a548ab89-b523-4f50-b490-7470e05662b6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.084963 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.084993 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a548ab89-b523-4f50-b490-7470e05662b6-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.085020 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9fzd\" (UniqueName: \"kubernetes.io/projected/a548ab89-b523-4f50-b490-7470e05662b6-kube-api-access-g9fzd\") on node \"crc\" DevicePath \"\"" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.355979 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" event={"ID":"a548ab89-b523-4f50-b490-7470e05662b6","Type":"ContainerDied","Data":"eb04c2592af421fb5b792dcdca590f13032fa6f29f0547ca652b42b612a91cb4"} Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.356248 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb04c2592af421fb5b792dcdca590f13032fa6f29f0547ca652b42b612a91cb4" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.356312 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-fx279" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.439749 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh"] Nov 24 17:31:38 crc kubenswrapper[4760]: E1124 17:31:38.440363 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a548ab89-b523-4f50-b490-7470e05662b6" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.440398 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a548ab89-b523-4f50-b490-7470e05662b6" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.440743 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a548ab89-b523-4f50-b490-7470e05662b6" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.441854 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.445287 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.445497 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.445625 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.445834 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.457077 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh"] Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.601713 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28rj7\" (UniqueName: \"kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.601798 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.602146 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.704159 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.704360 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28rj7\" (UniqueName: \"kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.704484 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.709841 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.709986 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.723358 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28rj7\" (UniqueName: \"kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sxvjh\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:38 crc kubenswrapper[4760]: I1124 17:31:38.770406 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:31:39 crc kubenswrapper[4760]: I1124 17:31:39.297114 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh"] Nov 24 17:31:39 crc kubenswrapper[4760]: I1124 17:31:39.364941 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" event={"ID":"a58b5d43-9b4d-4061-96e1-e02c61a4630c","Type":"ContainerStarted","Data":"3ade5000327347386476eef4cf87983ed9ed1b57f24adabb9663047f2e984aa6"} Nov 24 17:31:40 crc kubenswrapper[4760]: I1124 17:31:40.379380 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" event={"ID":"a58b5d43-9b4d-4061-96e1-e02c61a4630c","Type":"ContainerStarted","Data":"05f2dc5330674bbab0a809ea780b47aeeb1737f0771cd1533f7495b7dc9c187e"} Nov 24 17:31:47 crc kubenswrapper[4760]: I1124 17:31:47.468135 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:31:47 crc kubenswrapper[4760]: E1124 17:31:47.468935 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:31:56 crc kubenswrapper[4760]: I1124 17:31:56.059570 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" podStartSLOduration=17.649080998 podStartE2EDuration="18.059549345s" podCreationTimestamp="2025-11-24 17:31:38 +0000 UTC" firstStartedPulling="2025-11-24 17:31:39.304989899 +0000 UTC m=+1694.627871449" lastFinishedPulling="2025-11-24 17:31:39.715458236 +0000 UTC m=+1695.038339796" observedRunningTime="2025-11-24 17:31:40.409125452 +0000 UTC m=+1695.732007002" watchObservedRunningTime="2025-11-24 17:31:56.059549345 +0000 UTC m=+1711.382430905" Nov 24 17:31:56 crc kubenswrapper[4760]: I1124 17:31:56.061733 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jxp48"] Nov 24 17:31:56 crc kubenswrapper[4760]: I1124 17:31:56.076701 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jxp48"] Nov 24 17:31:57 crc kubenswrapper[4760]: I1124 17:31:57.477551 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b8dc02-4f92-415c-be5c-0822ff170919" path="/var/lib/kubelet/pods/79b8dc02-4f92-415c-be5c-0822ff170919/volumes" Nov 24 17:32:02 crc kubenswrapper[4760]: I1124 17:32:02.466894 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:32:02 crc kubenswrapper[4760]: E1124 17:32:02.467686 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:32:14 crc kubenswrapper[4760]: I1124 17:32:14.467376 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:32:14 crc kubenswrapper[4760]: E1124 17:32:14.468339 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.055389 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4xzmb"] Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.085081 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-982zc"] Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.101834 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4xzmb"] Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.112435 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-982zc"] Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.479579 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ceea6f9-40cc-4203-8d24-79bb3b19eebe" path="/var/lib/kubelet/pods/4ceea6f9-40cc-4203-8d24-79bb3b19eebe/volumes" Nov 24 17:32:15 crc kubenswrapper[4760]: I1124 17:32:15.480891 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5870c82c-79ce-46d0-861b-bfebbab194f9" path="/var/lib/kubelet/pods/5870c82c-79ce-46d0-861b-bfebbab194f9/volumes" Nov 24 17:32:16 crc kubenswrapper[4760]: I1124 17:32:16.700075 4760 generic.go:334] "Generic (PLEG): container finished" podID="a58b5d43-9b4d-4061-96e1-e02c61a4630c" containerID="05f2dc5330674bbab0a809ea780b47aeeb1737f0771cd1533f7495b7dc9c187e" exitCode=0 Nov 24 17:32:16 crc kubenswrapper[4760]: I1124 17:32:16.700156 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" event={"ID":"a58b5d43-9b4d-4061-96e1-e02c61a4630c","Type":"ContainerDied","Data":"05f2dc5330674bbab0a809ea780b47aeeb1737f0771cd1533f7495b7dc9c187e"} Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.123816 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.197418 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28rj7\" (UniqueName: \"kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7\") pod \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.197497 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory\") pod \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.197626 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key\") pod \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\" (UID: \"a58b5d43-9b4d-4061-96e1-e02c61a4630c\") " Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.203342 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7" (OuterVolumeSpecName: "kube-api-access-28rj7") pod "a58b5d43-9b4d-4061-96e1-e02c61a4630c" (UID: "a58b5d43-9b4d-4061-96e1-e02c61a4630c"). InnerVolumeSpecName "kube-api-access-28rj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.223168 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory" (OuterVolumeSpecName: "inventory") pod "a58b5d43-9b4d-4061-96e1-e02c61a4630c" (UID: "a58b5d43-9b4d-4061-96e1-e02c61a4630c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.224678 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a58b5d43-9b4d-4061-96e1-e02c61a4630c" (UID: "a58b5d43-9b4d-4061-96e1-e02c61a4630c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.299558 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.299585 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28rj7\" (UniqueName: \"kubernetes.io/projected/a58b5d43-9b4d-4061-96e1-e02c61a4630c-kube-api-access-28rj7\") on node \"crc\" DevicePath \"\"" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.299599 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58b5d43-9b4d-4061-96e1-e02c61a4630c-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.718598 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" event={"ID":"a58b5d43-9b4d-4061-96e1-e02c61a4630c","Type":"ContainerDied","Data":"3ade5000327347386476eef4cf87983ed9ed1b57f24adabb9663047f2e984aa6"} Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.718642 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ade5000327347386476eef4cf87983ed9ed1b57f24adabb9663047f2e984aa6" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.718668 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sxvjh" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.797922 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb"] Nov 24 17:32:18 crc kubenswrapper[4760]: E1124 17:32:18.798486 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58b5d43-9b4d-4061-96e1-e02c61a4630c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.798512 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58b5d43-9b4d-4061-96e1-e02c61a4630c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.798724 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58b5d43-9b4d-4061-96e1-e02c61a4630c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.799497 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.802474 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.802479 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.803063 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.803422 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.811558 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb"] Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.909933 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b787l\" (UniqueName: \"kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.910087 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:18 crc kubenswrapper[4760]: I1124 17:32:18.910128 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.012046 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b787l\" (UniqueName: \"kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.012477 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.012525 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.034063 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.034827 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.037992 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b787l\" (UniqueName: \"kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.170931 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:32:19 crc kubenswrapper[4760]: I1124 17:32:19.731231 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb"] Nov 24 17:32:20 crc kubenswrapper[4760]: I1124 17:32:20.736543 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" event={"ID":"37d99b2c-138d-4470-9807-eec5191203a6","Type":"ContainerStarted","Data":"af654997a9a50a773abb1f90c2d16fc374ba0922ed0ca4499fd3706ed787f5ea"} Nov 24 17:32:20 crc kubenswrapper[4760]: I1124 17:32:20.736812 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" event={"ID":"37d99b2c-138d-4470-9807-eec5191203a6","Type":"ContainerStarted","Data":"0704d50cc7e48aecfc17a4d9cb71fa32d636a495e0a62d00fe3b67f953505bf5"} Nov 24 17:32:20 crc kubenswrapper[4760]: I1124 17:32:20.758423 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" podStartSLOduration=2.332583252 podStartE2EDuration="2.75840811s" podCreationTimestamp="2025-11-24 17:32:18 +0000 UTC" firstStartedPulling="2025-11-24 17:32:19.731066188 +0000 UTC m=+1735.053947738" lastFinishedPulling="2025-11-24 17:32:20.156891026 +0000 UTC m=+1735.479772596" observedRunningTime="2025-11-24 17:32:20.758107751 +0000 UTC m=+1736.080989311" watchObservedRunningTime="2025-11-24 17:32:20.75840811 +0000 UTC m=+1736.081289660" Nov 24 17:32:25 crc kubenswrapper[4760]: I1124 17:32:25.473105 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:32:25 crc kubenswrapper[4760]: E1124 17:32:25.476679 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:32:33 crc kubenswrapper[4760]: I1124 17:32:33.085490 4760 scope.go:117] "RemoveContainer" containerID="8c52a38789ce8fe16540733510b8a381c264f8d6854afb30dc68241931266d06" Nov 24 17:32:33 crc kubenswrapper[4760]: I1124 17:32:33.119481 4760 scope.go:117] "RemoveContainer" containerID="5367d58febd30efee2c861a698ae89894372b4386b462351523fa207707232f4" Nov 24 17:32:33 crc kubenswrapper[4760]: I1124 17:32:33.185938 4760 scope.go:117] "RemoveContainer" containerID="8051cdac390a97e6818990f57d131f6ed1953606210d5921be2a42b25a2a34c2" Nov 24 17:32:37 crc kubenswrapper[4760]: I1124 17:32:37.466472 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:32:37 crc kubenswrapper[4760]: E1124 17:32:37.467107 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:32:51 crc kubenswrapper[4760]: I1124 17:32:51.467028 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:32:51 crc kubenswrapper[4760]: E1124 17:32:51.468599 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:32:59 crc kubenswrapper[4760]: I1124 17:32:59.037447 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-k26pp"] Nov 24 17:32:59 crc kubenswrapper[4760]: I1124 17:32:59.044991 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-k26pp"] Nov 24 17:32:59 crc kubenswrapper[4760]: I1124 17:32:59.477875 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="332e7b13-fb92-4803-a09a-1d3368fa74a0" path="/var/lib/kubelet/pods/332e7b13-fb92-4803-a09a-1d3368fa74a0/volumes" Nov 24 17:33:05 crc kubenswrapper[4760]: I1124 17:33:05.472184 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:33:05 crc kubenswrapper[4760]: E1124 17:33:05.472996 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:33:08 crc kubenswrapper[4760]: I1124 17:33:08.148237 4760 generic.go:334] "Generic (PLEG): container finished" podID="37d99b2c-138d-4470-9807-eec5191203a6" containerID="af654997a9a50a773abb1f90c2d16fc374ba0922ed0ca4499fd3706ed787f5ea" exitCode=0 Nov 24 17:33:08 crc kubenswrapper[4760]: I1124 17:33:08.148344 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" event={"ID":"37d99b2c-138d-4470-9807-eec5191203a6","Type":"ContainerDied","Data":"af654997a9a50a773abb1f90c2d16fc374ba0922ed0ca4499fd3706ed787f5ea"} Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.580192 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.648492 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory\") pod \"37d99b2c-138d-4470-9807-eec5191203a6\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.648583 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b787l\" (UniqueName: \"kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l\") pod \"37d99b2c-138d-4470-9807-eec5191203a6\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.648627 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key\") pod \"37d99b2c-138d-4470-9807-eec5191203a6\" (UID: \"37d99b2c-138d-4470-9807-eec5191203a6\") " Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.655332 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l" (OuterVolumeSpecName: "kube-api-access-b787l") pod "37d99b2c-138d-4470-9807-eec5191203a6" (UID: "37d99b2c-138d-4470-9807-eec5191203a6"). InnerVolumeSpecName "kube-api-access-b787l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.676544 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "37d99b2c-138d-4470-9807-eec5191203a6" (UID: "37d99b2c-138d-4470-9807-eec5191203a6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.676878 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory" (OuterVolumeSpecName: "inventory") pod "37d99b2c-138d-4470-9807-eec5191203a6" (UID: "37d99b2c-138d-4470-9807-eec5191203a6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.751179 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b787l\" (UniqueName: \"kubernetes.io/projected/37d99b2c-138d-4470-9807-eec5191203a6-kube-api-access-b787l\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.751211 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:09 crc kubenswrapper[4760]: I1124 17:33:09.751222 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/37d99b2c-138d-4470-9807-eec5191203a6-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.170985 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" event={"ID":"37d99b2c-138d-4470-9807-eec5191203a6","Type":"ContainerDied","Data":"0704d50cc7e48aecfc17a4d9cb71fa32d636a495e0a62d00fe3b67f953505bf5"} Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.171400 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0704d50cc7e48aecfc17a4d9cb71fa32d636a495e0a62d00fe3b67f953505bf5" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.171186 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.265322 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-2jq4r"] Nov 24 17:33:10 crc kubenswrapper[4760]: E1124 17:33:10.266498 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d99b2c-138d-4470-9807-eec5191203a6" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.266700 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d99b2c-138d-4470-9807-eec5191203a6" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.267350 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d99b2c-138d-4470-9807-eec5191203a6" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.268998 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.273694 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.273706 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.275033 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.275225 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.278764 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-2jq4r"] Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.362364 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5tfr\" (UniqueName: \"kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.362418 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.362462 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.463946 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5tfr\" (UniqueName: \"kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.464016 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.464065 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.471725 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.473863 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.489718 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5tfr\" (UniqueName: \"kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr\") pod \"ssh-known-hosts-edpm-deployment-2jq4r\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:10 crc kubenswrapper[4760]: I1124 17:33:10.600796 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:11 crc kubenswrapper[4760]: I1124 17:33:11.107462 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-2jq4r"] Nov 24 17:33:11 crc kubenswrapper[4760]: W1124 17:33:11.111397 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bad4d45_bcec_460e_b393_2c8841842af8.slice/crio-6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741 WatchSource:0}: Error finding container 6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741: Status 404 returned error can't find the container with id 6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741 Nov 24 17:33:11 crc kubenswrapper[4760]: I1124 17:33:11.179303 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" event={"ID":"0bad4d45-bcec-460e-b393-2c8841842af8","Type":"ContainerStarted","Data":"6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741"} Nov 24 17:33:12 crc kubenswrapper[4760]: I1124 17:33:12.187587 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" event={"ID":"0bad4d45-bcec-460e-b393-2c8841842af8","Type":"ContainerStarted","Data":"0ef410f16e05d18e09b7739b61cbf33c877b5f09249bcac7eb4830cf758fdc5a"} Nov 24 17:33:12 crc kubenswrapper[4760]: I1124 17:33:12.211678 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" podStartSLOduration=1.757264143 podStartE2EDuration="2.211660472s" podCreationTimestamp="2025-11-24 17:33:10 +0000 UTC" firstStartedPulling="2025-11-24 17:33:11.113636528 +0000 UTC m=+1786.436518078" lastFinishedPulling="2025-11-24 17:33:11.568032867 +0000 UTC m=+1786.890914407" observedRunningTime="2025-11-24 17:33:12.206669199 +0000 UTC m=+1787.529550739" watchObservedRunningTime="2025-11-24 17:33:12.211660472 +0000 UTC m=+1787.534542022" Nov 24 17:33:16 crc kubenswrapper[4760]: I1124 17:33:16.467294 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:33:16 crc kubenswrapper[4760]: E1124 17:33:16.468786 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:33:19 crc kubenswrapper[4760]: I1124 17:33:19.251525 4760 generic.go:334] "Generic (PLEG): container finished" podID="0bad4d45-bcec-460e-b393-2c8841842af8" containerID="0ef410f16e05d18e09b7739b61cbf33c877b5f09249bcac7eb4830cf758fdc5a" exitCode=0 Nov 24 17:33:19 crc kubenswrapper[4760]: I1124 17:33:19.251619 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" event={"ID":"0bad4d45-bcec-460e-b393-2c8841842af8","Type":"ContainerDied","Data":"0ef410f16e05d18e09b7739b61cbf33c877b5f09249bcac7eb4830cf758fdc5a"} Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.674609 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.766771 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0\") pod \"0bad4d45-bcec-460e-b393-2c8841842af8\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.767149 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam\") pod \"0bad4d45-bcec-460e-b393-2c8841842af8\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.767344 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5tfr\" (UniqueName: \"kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr\") pod \"0bad4d45-bcec-460e-b393-2c8841842af8\" (UID: \"0bad4d45-bcec-460e-b393-2c8841842af8\") " Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.772225 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr" (OuterVolumeSpecName: "kube-api-access-s5tfr") pod "0bad4d45-bcec-460e-b393-2c8841842af8" (UID: "0bad4d45-bcec-460e-b393-2c8841842af8"). InnerVolumeSpecName "kube-api-access-s5tfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.793056 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "0bad4d45-bcec-460e-b393-2c8841842af8" (UID: "0bad4d45-bcec-460e-b393-2c8841842af8"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.795261 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0bad4d45-bcec-460e-b393-2c8841842af8" (UID: "0bad4d45-bcec-460e-b393-2c8841842af8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.869644 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5tfr\" (UniqueName: \"kubernetes.io/projected/0bad4d45-bcec-460e-b393-2c8841842af8-kube-api-access-s5tfr\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.869687 4760 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:20 crc kubenswrapper[4760]: I1124 17:33:20.869701 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0bad4d45-bcec-460e-b393-2c8841842af8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.270485 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" event={"ID":"0bad4d45-bcec-460e-b393-2c8841842af8","Type":"ContainerDied","Data":"6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741"} Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.270543 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cc65c9c295794b38e4a1622eb293538422df4dd7c9d45027915e5dc2d85d741" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.270552 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-2jq4r" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.458620 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr"] Nov 24 17:33:21 crc kubenswrapper[4760]: E1124 17:33:21.459100 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bad4d45-bcec-460e-b393-2c8841842af8" containerName="ssh-known-hosts-edpm-deployment" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.459118 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bad4d45-bcec-460e-b393-2c8841842af8" containerName="ssh-known-hosts-edpm-deployment" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.459352 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bad4d45-bcec-460e-b393-2c8841842af8" containerName="ssh-known-hosts-edpm-deployment" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.460290 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.467924 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.468040 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.468083 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.471997 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.479978 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.480218 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr8tg\" (UniqueName: \"kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.480422 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.481923 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr"] Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.583207 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.583360 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr8tg\" (UniqueName: \"kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.584065 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.588534 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.596636 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.599614 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr8tg\" (UniqueName: \"kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-7hvxr\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:21 crc kubenswrapper[4760]: I1124 17:33:21.787487 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:22 crc kubenswrapper[4760]: I1124 17:33:22.304191 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr"] Nov 24 17:33:23 crc kubenswrapper[4760]: I1124 17:33:23.289077 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" event={"ID":"7f9d57fb-8bae-4055-aa31-d14b9cd38b62","Type":"ContainerStarted","Data":"a57efa59ca94061e95e6c1912dc2495e581398b055afda06d25b81be09ec7185"} Nov 24 17:33:23 crc kubenswrapper[4760]: I1124 17:33:23.289517 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" event={"ID":"7f9d57fb-8bae-4055-aa31-d14b9cd38b62","Type":"ContainerStarted","Data":"aa6e5c628d86b446542d49416dbb2f7e7c5249009d7810b8037107e4ae9117a7"} Nov 24 17:33:23 crc kubenswrapper[4760]: I1124 17:33:23.313506 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" podStartSLOduration=1.888497605 podStartE2EDuration="2.313480455s" podCreationTimestamp="2025-11-24 17:33:21 +0000 UTC" firstStartedPulling="2025-11-24 17:33:22.314032929 +0000 UTC m=+1797.636914489" lastFinishedPulling="2025-11-24 17:33:22.739015789 +0000 UTC m=+1798.061897339" observedRunningTime="2025-11-24 17:33:23.311202559 +0000 UTC m=+1798.634084179" watchObservedRunningTime="2025-11-24 17:33:23.313480455 +0000 UTC m=+1798.636362025" Nov 24 17:33:28 crc kubenswrapper[4760]: I1124 17:33:28.466886 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:33:28 crc kubenswrapper[4760]: E1124 17:33:28.467801 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:33:31 crc kubenswrapper[4760]: I1124 17:33:31.359094 4760 generic.go:334] "Generic (PLEG): container finished" podID="7f9d57fb-8bae-4055-aa31-d14b9cd38b62" containerID="a57efa59ca94061e95e6c1912dc2495e581398b055afda06d25b81be09ec7185" exitCode=0 Nov 24 17:33:31 crc kubenswrapper[4760]: I1124 17:33:31.359450 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" event={"ID":"7f9d57fb-8bae-4055-aa31-d14b9cd38b62","Type":"ContainerDied","Data":"a57efa59ca94061e95e6c1912dc2495e581398b055afda06d25b81be09ec7185"} Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.761231 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.927996 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key\") pod \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.928212 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mr8tg\" (UniqueName: \"kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg\") pod \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.928305 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory\") pod \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\" (UID: \"7f9d57fb-8bae-4055-aa31-d14b9cd38b62\") " Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.933025 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg" (OuterVolumeSpecName: "kube-api-access-mr8tg") pod "7f9d57fb-8bae-4055-aa31-d14b9cd38b62" (UID: "7f9d57fb-8bae-4055-aa31-d14b9cd38b62"). InnerVolumeSpecName "kube-api-access-mr8tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.957489 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory" (OuterVolumeSpecName: "inventory") pod "7f9d57fb-8bae-4055-aa31-d14b9cd38b62" (UID: "7f9d57fb-8bae-4055-aa31-d14b9cd38b62"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:32 crc kubenswrapper[4760]: I1124 17:33:32.957558 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7f9d57fb-8bae-4055-aa31-d14b9cd38b62" (UID: "7f9d57fb-8bae-4055-aa31-d14b9cd38b62"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.030471 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.030516 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mr8tg\" (UniqueName: \"kubernetes.io/projected/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-kube-api-access-mr8tg\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.030536 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f9d57fb-8bae-4055-aa31-d14b9cd38b62-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.317534 4760 scope.go:117] "RemoveContainer" containerID="aea710e3b7d41f64eb2e4539c73e5baff1889513d0c522e32571b42dbe5f9ae8" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.382130 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" event={"ID":"7f9d57fb-8bae-4055-aa31-d14b9cd38b62","Type":"ContainerDied","Data":"aa6e5c628d86b446542d49416dbb2f7e7c5249009d7810b8037107e4ae9117a7"} Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.382202 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa6e5c628d86b446542d49416dbb2f7e7c5249009d7810b8037107e4ae9117a7" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.382150 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-7hvxr" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.447031 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5"] Nov 24 17:33:33 crc kubenswrapper[4760]: E1124 17:33:33.447474 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f9d57fb-8bae-4055-aa31-d14b9cd38b62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.447499 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f9d57fb-8bae-4055-aa31-d14b9cd38b62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.447748 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f9d57fb-8bae-4055-aa31-d14b9cd38b62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.448518 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.454380 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.454895 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.455062 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.455257 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.455286 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5"] Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.539615 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.539709 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j8w9\" (UniqueName: \"kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.540177 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.641962 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.642049 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.642102 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j8w9\" (UniqueName: \"kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.646503 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.646564 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.664583 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j8w9\" (UniqueName: \"kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:33 crc kubenswrapper[4760]: I1124 17:33:33.769725 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:34 crc kubenswrapper[4760]: I1124 17:33:34.358178 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5"] Nov 24 17:33:34 crc kubenswrapper[4760]: I1124 17:33:34.395808 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" event={"ID":"7010932f-cdb9-47d9-8674-07778eda876d","Type":"ContainerStarted","Data":"bb16a3302c563c79b053588be418cedf44eada31c4cd5344bb80b894cfc0ba5d"} Nov 24 17:33:35 crc kubenswrapper[4760]: I1124 17:33:35.423715 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" event={"ID":"7010932f-cdb9-47d9-8674-07778eda876d","Type":"ContainerStarted","Data":"4315f86bd4afc898dfd897970acdd7ccfe0e307cbd2dd63a81f9608263637bde"} Nov 24 17:33:42 crc kubenswrapper[4760]: I1124 17:33:42.466639 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:33:42 crc kubenswrapper[4760]: E1124 17:33:42.468055 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:33:44 crc kubenswrapper[4760]: I1124 17:33:44.502070 4760 generic.go:334] "Generic (PLEG): container finished" podID="7010932f-cdb9-47d9-8674-07778eda876d" containerID="4315f86bd4afc898dfd897970acdd7ccfe0e307cbd2dd63a81f9608263637bde" exitCode=0 Nov 24 17:33:44 crc kubenswrapper[4760]: I1124 17:33:44.502155 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" event={"ID":"7010932f-cdb9-47d9-8674-07778eda876d","Type":"ContainerDied","Data":"4315f86bd4afc898dfd897970acdd7ccfe0e307cbd2dd63a81f9608263637bde"} Nov 24 17:33:45 crc kubenswrapper[4760]: I1124 17:33:45.958966 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.023217 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key\") pod \"7010932f-cdb9-47d9-8674-07778eda876d\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.023370 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory\") pod \"7010932f-cdb9-47d9-8674-07778eda876d\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.023403 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9j8w9\" (UniqueName: \"kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9\") pod \"7010932f-cdb9-47d9-8674-07778eda876d\" (UID: \"7010932f-cdb9-47d9-8674-07778eda876d\") " Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.033202 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9" (OuterVolumeSpecName: "kube-api-access-9j8w9") pod "7010932f-cdb9-47d9-8674-07778eda876d" (UID: "7010932f-cdb9-47d9-8674-07778eda876d"). InnerVolumeSpecName "kube-api-access-9j8w9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.055530 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7010932f-cdb9-47d9-8674-07778eda876d" (UID: "7010932f-cdb9-47d9-8674-07778eda876d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.066076 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory" (OuterVolumeSpecName: "inventory") pod "7010932f-cdb9-47d9-8674-07778eda876d" (UID: "7010932f-cdb9-47d9-8674-07778eda876d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.125315 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.125352 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7010932f-cdb9-47d9-8674-07778eda876d-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.125362 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9j8w9\" (UniqueName: \"kubernetes.io/projected/7010932f-cdb9-47d9-8674-07778eda876d-kube-api-access-9j8w9\") on node \"crc\" DevicePath \"\"" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.523298 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" event={"ID":"7010932f-cdb9-47d9-8674-07778eda876d","Type":"ContainerDied","Data":"bb16a3302c563c79b053588be418cedf44eada31c4cd5344bb80b894cfc0ba5d"} Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.523351 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb16a3302c563c79b053588be418cedf44eada31c4cd5344bb80b894cfc0ba5d" Nov 24 17:33:46 crc kubenswrapper[4760]: I1124 17:33:46.523407 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.070815 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh"] Nov 24 17:33:47 crc kubenswrapper[4760]: E1124 17:33:47.071201 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7010932f-cdb9-47d9-8674-07778eda876d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.071215 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="7010932f-cdb9-47d9-8674-07778eda876d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.071395 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="7010932f-cdb9-47d9-8674-07778eda876d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.072012 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.078661 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.078787 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.078928 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.079050 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.079088 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.079127 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.079196 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.079267 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.085936 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh"] Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149022 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149090 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149130 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149290 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149342 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149408 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149484 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149582 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149663 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149721 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149768 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149808 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149834 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.149858 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thqt8\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.251796 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252251 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252298 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252325 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252407 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252458 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252498 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252532 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252813 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252852 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252878 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252903 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.252926 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thqt8\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.253016 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.256127 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.256703 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.256791 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.256977 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.257536 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.258051 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.258602 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.258718 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.260021 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.260481 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.261873 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.263879 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.268900 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.273319 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thqt8\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.394644 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:33:47 crc kubenswrapper[4760]: I1124 17:33:47.943593 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh"] Nov 24 17:33:48 crc kubenswrapper[4760]: I1124 17:33:48.544412 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" event={"ID":"2522087b-33dd-418b-abb2-813ca0f5a051","Type":"ContainerStarted","Data":"ddf5783fd009497522785e84c60681bcb5bd77f7b580074e64e4e4992e137fb4"} Nov 24 17:33:49 crc kubenswrapper[4760]: I1124 17:33:49.560477 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" event={"ID":"2522087b-33dd-418b-abb2-813ca0f5a051","Type":"ContainerStarted","Data":"5fa76e9a21afbdb38620357412fe8374dd7c31fd88254be8ee862b7da08b1145"} Nov 24 17:33:49 crc kubenswrapper[4760]: I1124 17:33:49.588445 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" podStartSLOduration=2.166397009 podStartE2EDuration="2.588421514s" podCreationTimestamp="2025-11-24 17:33:47 +0000 UTC" firstStartedPulling="2025-11-24 17:33:47.943591933 +0000 UTC m=+1823.266473493" lastFinishedPulling="2025-11-24 17:33:48.365616448 +0000 UTC m=+1823.688497998" observedRunningTime="2025-11-24 17:33:49.579145718 +0000 UTC m=+1824.902027288" watchObservedRunningTime="2025-11-24 17:33:49.588421514 +0000 UTC m=+1824.911303074" Nov 24 17:33:54 crc kubenswrapper[4760]: I1124 17:33:54.466553 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:33:54 crc kubenswrapper[4760]: E1124 17:33:54.468391 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:34:09 crc kubenswrapper[4760]: I1124 17:34:09.466558 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:34:09 crc kubenswrapper[4760]: E1124 17:34:09.467468 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:34:20 crc kubenswrapper[4760]: I1124 17:34:20.465992 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:34:20 crc kubenswrapper[4760]: E1124 17:34:20.466909 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:34:25 crc kubenswrapper[4760]: I1124 17:34:25.897158 4760 generic.go:334] "Generic (PLEG): container finished" podID="2522087b-33dd-418b-abb2-813ca0f5a051" containerID="5fa76e9a21afbdb38620357412fe8374dd7c31fd88254be8ee862b7da08b1145" exitCode=0 Nov 24 17:34:25 crc kubenswrapper[4760]: I1124 17:34:25.897255 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" event={"ID":"2522087b-33dd-418b-abb2-813ca0f5a051","Type":"ContainerDied","Data":"5fa76e9a21afbdb38620357412fe8374dd7c31fd88254be8ee862b7da08b1145"} Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.328976 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417488 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417594 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417625 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thqt8\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417674 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417705 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417777 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417800 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.417885 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.418761 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.419631 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.419801 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.419933 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.419976 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.420108 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory\") pod \"2522087b-33dd-418b-abb2-813ca0f5a051\" (UID: \"2522087b-33dd-418b-abb2-813ca0f5a051\") " Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.425322 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8" (OuterVolumeSpecName: "kube-api-access-thqt8") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "kube-api-access-thqt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.425340 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.425980 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.426206 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.427039 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.428223 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.428586 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.428705 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.429041 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.429533 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.430192 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.430713 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.454240 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory" (OuterVolumeSpecName: "inventory") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.456019 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2522087b-33dd-418b-abb2-813ca0f5a051" (UID: "2522087b-33dd-418b-abb2-813ca0f5a051"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521411 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521449 4760 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521459 4760 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521468 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521478 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521486 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521496 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521507 4760 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521517 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thqt8\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-kube-api-access-thqt8\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521526 4760 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521534 4760 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521543 4760 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521552 4760 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2522087b-33dd-418b-abb2-813ca0f5a051-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.521561 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/2522087b-33dd-418b-abb2-813ca0f5a051-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.919106 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" event={"ID":"2522087b-33dd-418b-abb2-813ca0f5a051","Type":"ContainerDied","Data":"ddf5783fd009497522785e84c60681bcb5bd77f7b580074e64e4e4992e137fb4"} Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.919146 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddf5783fd009497522785e84c60681bcb5bd77f7b580074e64e4e4992e137fb4" Nov 24 17:34:27 crc kubenswrapper[4760]: I1124 17:34:27.919256 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.025304 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp"] Nov 24 17:34:28 crc kubenswrapper[4760]: E1124 17:34:28.025794 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2522087b-33dd-418b-abb2-813ca0f5a051" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.025813 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="2522087b-33dd-418b-abb2-813ca0f5a051" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.026075 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="2522087b-33dd-418b-abb2-813ca0f5a051" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.026776 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.028584 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqdxb\" (UniqueName: \"kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.028676 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.028714 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.028751 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.028997 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.038407 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp"] Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.044542 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.044896 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.045023 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.045223 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.045291 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.130940 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.131246 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.131356 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqdxb\" (UniqueName: \"kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.131467 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.131540 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.132108 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.137043 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.137650 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.145576 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.150901 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqdxb\" (UniqueName: \"kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vb5mp\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.356594 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:34:28 crc kubenswrapper[4760]: I1124 17:34:28.922047 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp"] Nov 24 17:34:29 crc kubenswrapper[4760]: I1124 17:34:29.935844 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" event={"ID":"5e897692-730b-402f-a1a7-5f242a36fe2b","Type":"ContainerStarted","Data":"b1707d79aab87b4fe9d903bedf50198493c6cbf979978c0b278fce960fa358e8"} Nov 24 17:34:30 crc kubenswrapper[4760]: I1124 17:34:30.947479 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" event={"ID":"5e897692-730b-402f-a1a7-5f242a36fe2b","Type":"ContainerStarted","Data":"6306ffdd7c63206c0c1b74c843efcc92671c53aba7f3536a4ad7bf04823013bd"} Nov 24 17:34:30 crc kubenswrapper[4760]: I1124 17:34:30.970411 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" podStartSLOduration=2.136997132 podStartE2EDuration="2.970392746s" podCreationTimestamp="2025-11-24 17:34:28 +0000 UTC" firstStartedPulling="2025-11-24 17:34:28.924266145 +0000 UTC m=+1864.247147705" lastFinishedPulling="2025-11-24 17:34:29.757661769 +0000 UTC m=+1865.080543319" observedRunningTime="2025-11-24 17:34:30.968899183 +0000 UTC m=+1866.291780733" watchObservedRunningTime="2025-11-24 17:34:30.970392746 +0000 UTC m=+1866.293274296" Nov 24 17:34:32 crc kubenswrapper[4760]: I1124 17:34:32.466675 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:34:32 crc kubenswrapper[4760]: E1124 17:34:32.467223 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:34:47 crc kubenswrapper[4760]: I1124 17:34:47.468347 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:34:48 crc kubenswrapper[4760]: I1124 17:34:48.116252 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77"} Nov 24 17:35:32 crc kubenswrapper[4760]: I1124 17:35:32.744142 4760 generic.go:334] "Generic (PLEG): container finished" podID="5e897692-730b-402f-a1a7-5f242a36fe2b" containerID="6306ffdd7c63206c0c1b74c843efcc92671c53aba7f3536a4ad7bf04823013bd" exitCode=0 Nov 24 17:35:32 crc kubenswrapper[4760]: I1124 17:35:32.744248 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" event={"ID":"5e897692-730b-402f-a1a7-5f242a36fe2b","Type":"ContainerDied","Data":"6306ffdd7c63206c0c1b74c843efcc92671c53aba7f3536a4ad7bf04823013bd"} Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.173710 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.330417 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory\") pod \"5e897692-730b-402f-a1a7-5f242a36fe2b\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.330549 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle\") pod \"5e897692-730b-402f-a1a7-5f242a36fe2b\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.330581 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0\") pod \"5e897692-730b-402f-a1a7-5f242a36fe2b\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.330618 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key\") pod \"5e897692-730b-402f-a1a7-5f242a36fe2b\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.330644 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqdxb\" (UniqueName: \"kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb\") pod \"5e897692-730b-402f-a1a7-5f242a36fe2b\" (UID: \"5e897692-730b-402f-a1a7-5f242a36fe2b\") " Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.336936 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb" (OuterVolumeSpecName: "kube-api-access-kqdxb") pod "5e897692-730b-402f-a1a7-5f242a36fe2b" (UID: "5e897692-730b-402f-a1a7-5f242a36fe2b"). InnerVolumeSpecName "kube-api-access-kqdxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.337446 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "5e897692-730b-402f-a1a7-5f242a36fe2b" (UID: "5e897692-730b-402f-a1a7-5f242a36fe2b"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.358209 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory" (OuterVolumeSpecName: "inventory") pod "5e897692-730b-402f-a1a7-5f242a36fe2b" (UID: "5e897692-730b-402f-a1a7-5f242a36fe2b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.361070 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5e897692-730b-402f-a1a7-5f242a36fe2b" (UID: "5e897692-730b-402f-a1a7-5f242a36fe2b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.364408 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "5e897692-730b-402f-a1a7-5f242a36fe2b" (UID: "5e897692-730b-402f-a1a7-5f242a36fe2b"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.432827 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.432860 4760 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.432872 4760 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5e897692-730b-402f-a1a7-5f242a36fe2b-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.432881 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e897692-730b-402f-a1a7-5f242a36fe2b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.432891 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqdxb\" (UniqueName: \"kubernetes.io/projected/5e897692-730b-402f-a1a7-5f242a36fe2b-kube-api-access-kqdxb\") on node \"crc\" DevicePath \"\"" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.764438 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" event={"ID":"5e897692-730b-402f-a1a7-5f242a36fe2b","Type":"ContainerDied","Data":"b1707d79aab87b4fe9d903bedf50198493c6cbf979978c0b278fce960fa358e8"} Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.764766 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1707d79aab87b4fe9d903bedf50198493c6cbf979978c0b278fce960fa358e8" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.764513 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vb5mp" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.866124 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7"] Nov 24 17:35:34 crc kubenswrapper[4760]: E1124 17:35:34.866506 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e897692-730b-402f-a1a7-5f242a36fe2b" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.866521 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e897692-730b-402f-a1a7-5f242a36fe2b" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.866929 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e897692-730b-402f-a1a7-5f242a36fe2b" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.867555 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871179 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871254 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871410 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871439 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871476 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.871515 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 24 17:35:34 crc kubenswrapper[4760]: I1124 17:35:34.889337 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7"] Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.043868 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.043983 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.044037 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.044058 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.044114 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.044386 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjgq9\" (UniqueName: \"kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146240 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146349 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146395 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146425 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146496 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.146559 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjgq9\" (UniqueName: \"kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.151220 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.151280 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.152585 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.153141 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.155687 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.169740 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjgq9\" (UniqueName: \"kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.186956 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.686686 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7"] Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.698905 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:35:35 crc kubenswrapper[4760]: I1124 17:35:35.776207 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" event={"ID":"e88df757-ac39-4a30-b0aa-eb820708e3b4","Type":"ContainerStarted","Data":"b2c900a7f72361f140501f20e00f7b5c0f188c70d7a893605699c460a50d0b14"} Nov 24 17:35:36 crc kubenswrapper[4760]: I1124 17:35:36.793767 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" event={"ID":"e88df757-ac39-4a30-b0aa-eb820708e3b4","Type":"ContainerStarted","Data":"30344f790e113106ef838c6c606ec73d0787635ae5c656071b24db8d4c6ee316"} Nov 24 17:35:36 crc kubenswrapper[4760]: I1124 17:35:36.815619 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" podStartSLOduration=2.350431675 podStartE2EDuration="2.815596784s" podCreationTimestamp="2025-11-24 17:35:34 +0000 UTC" firstStartedPulling="2025-11-24 17:35:35.698668377 +0000 UTC m=+1931.021549927" lastFinishedPulling="2025-11-24 17:35:36.163833466 +0000 UTC m=+1931.486715036" observedRunningTime="2025-11-24 17:35:36.810266661 +0000 UTC m=+1932.133148221" watchObservedRunningTime="2025-11-24 17:35:36.815596784 +0000 UTC m=+1932.138478344" Nov 24 17:36:22 crc kubenswrapper[4760]: I1124 17:36:22.187916 4760 generic.go:334] "Generic (PLEG): container finished" podID="e88df757-ac39-4a30-b0aa-eb820708e3b4" containerID="30344f790e113106ef838c6c606ec73d0787635ae5c656071b24db8d4c6ee316" exitCode=0 Nov 24 17:36:22 crc kubenswrapper[4760]: I1124 17:36:22.188036 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" event={"ID":"e88df757-ac39-4a30-b0aa-eb820708e3b4","Type":"ContainerDied","Data":"30344f790e113106ef838c6c606ec73d0787635ae5c656071b24db8d4c6ee316"} Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.614456 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770431 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770531 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770657 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjgq9\" (UniqueName: \"kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770700 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770730 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.770751 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"e88df757-ac39-4a30-b0aa-eb820708e3b4\" (UID: \"e88df757-ac39-4a30-b0aa-eb820708e3b4\") " Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.776739 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9" (OuterVolumeSpecName: "kube-api-access-zjgq9") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "kube-api-access-zjgq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.784430 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.797976 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory" (OuterVolumeSpecName: "inventory") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.801851 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.803447 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.805966 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e88df757-ac39-4a30-b0aa-eb820708e3b4" (UID: "e88df757-ac39-4a30-b0aa-eb820708e3b4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872655 4760 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872694 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjgq9\" (UniqueName: \"kubernetes.io/projected/e88df757-ac39-4a30-b0aa-eb820708e3b4-kube-api-access-zjgq9\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872706 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872715 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872726 4760 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:23 crc kubenswrapper[4760]: I1124 17:36:23.872736 4760 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e88df757-ac39-4a30-b0aa-eb820708e3b4-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.205864 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" event={"ID":"e88df757-ac39-4a30-b0aa-eb820708e3b4","Type":"ContainerDied","Data":"b2c900a7f72361f140501f20e00f7b5c0f188c70d7a893605699c460a50d0b14"} Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.206191 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2c900a7f72361f140501f20e00f7b5c0f188c70d7a893605699c460a50d0b14" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.205925 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.371474 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l"] Nov 24 17:36:24 crc kubenswrapper[4760]: E1124 17:36:24.371907 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88df757-ac39-4a30-b0aa-eb820708e3b4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.371923 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88df757-ac39-4a30-b0aa-eb820708e3b4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.372173 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88df757-ac39-4a30-b0aa-eb820708e3b4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.372822 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.375304 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.375356 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.376529 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.376587 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.376608 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.383571 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l"] Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.485125 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.485501 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.485626 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.485680 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.485741 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd647\" (UniqueName: \"kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.591954 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd647\" (UniqueName: \"kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.592072 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.592165 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.592188 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.592209 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.596303 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.596463 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.597326 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.601431 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.607832 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd647\" (UniqueName: \"kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:24 crc kubenswrapper[4760]: I1124 17:36:24.690051 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:36:25 crc kubenswrapper[4760]: I1124 17:36:25.345876 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l"] Nov 24 17:36:26 crc kubenswrapper[4760]: I1124 17:36:26.227259 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" event={"ID":"d89f2f80-b7b0-49b2-beab-c4fd2d17352f","Type":"ContainerStarted","Data":"05bc87a4d0ad6c526cca12310ee600bc658ebb8f9f494bffb5988e3246bb4d44"} Nov 24 17:36:27 crc kubenswrapper[4760]: I1124 17:36:27.392923 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:36:28 crc kubenswrapper[4760]: I1124 17:36:28.249388 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" event={"ID":"d89f2f80-b7b0-49b2-beab-c4fd2d17352f","Type":"ContainerStarted","Data":"5a309a4cdf05e4bb9075c56f5e8b42ef9caa065374f7cb3d90aebe73670e9338"} Nov 24 17:36:28 crc kubenswrapper[4760]: I1124 17:36:28.270550 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" podStartSLOduration=2.231430932 podStartE2EDuration="4.270533553s" podCreationTimestamp="2025-11-24 17:36:24 +0000 UTC" firstStartedPulling="2025-11-24 17:36:25.351656821 +0000 UTC m=+1980.674538381" lastFinishedPulling="2025-11-24 17:36:27.390759452 +0000 UTC m=+1982.713641002" observedRunningTime="2025-11-24 17:36:28.263845662 +0000 UTC m=+1983.586727212" watchObservedRunningTime="2025-11-24 17:36:28.270533553 +0000 UTC m=+1983.593415103" Nov 24 17:37:05 crc kubenswrapper[4760]: I1124 17:37:05.642575 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:37:05 crc kubenswrapper[4760]: I1124 17:37:05.643161 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:37:35 crc kubenswrapper[4760]: I1124 17:37:35.642280 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:37:35 crc kubenswrapper[4760]: I1124 17:37:35.642926 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.164747 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.167656 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.184847 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.262713 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.262875 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kktcz\" (UniqueName: \"kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.262955 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.364816 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kktcz\" (UniqueName: \"kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.365257 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.365461 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.365751 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.366023 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.390189 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kktcz\" (UniqueName: \"kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz\") pod \"redhat-marketplace-mbd44\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.490449 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:37:58 crc kubenswrapper[4760]: I1124 17:37:58.969272 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:37:59 crc kubenswrapper[4760]: I1124 17:37:59.068952 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerStarted","Data":"9d0b0f437221517c5c8bd18dd52af93aaaf027911eb82108885f3a36d97c0b00"} Nov 24 17:38:00 crc kubenswrapper[4760]: I1124 17:38:00.078691 4760 generic.go:334] "Generic (PLEG): container finished" podID="843677c0-77bb-46ee-9a98-78da63f20a62" containerID="a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20" exitCode=0 Nov 24 17:38:00 crc kubenswrapper[4760]: I1124 17:38:00.078758 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerDied","Data":"a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20"} Nov 24 17:38:02 crc kubenswrapper[4760]: I1124 17:38:02.099796 4760 generic.go:334] "Generic (PLEG): container finished" podID="843677c0-77bb-46ee-9a98-78da63f20a62" containerID="cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6" exitCode=0 Nov 24 17:38:02 crc kubenswrapper[4760]: I1124 17:38:02.099940 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerDied","Data":"cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6"} Nov 24 17:38:03 crc kubenswrapper[4760]: I1124 17:38:03.113985 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerStarted","Data":"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413"} Nov 24 17:38:03 crc kubenswrapper[4760]: I1124 17:38:03.156960 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mbd44" podStartSLOduration=2.665858182 podStartE2EDuration="5.156934933s" podCreationTimestamp="2025-11-24 17:37:58 +0000 UTC" firstStartedPulling="2025-11-24 17:38:00.08069895 +0000 UTC m=+2075.403580500" lastFinishedPulling="2025-11-24 17:38:02.571775701 +0000 UTC m=+2077.894657251" observedRunningTime="2025-11-24 17:38:03.147975777 +0000 UTC m=+2078.470857337" watchObservedRunningTime="2025-11-24 17:38:03.156934933 +0000 UTC m=+2078.479816493" Nov 24 17:38:05 crc kubenswrapper[4760]: I1124 17:38:05.642267 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:38:05 crc kubenswrapper[4760]: I1124 17:38:05.642655 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:38:05 crc kubenswrapper[4760]: I1124 17:38:05.642706 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:38:05 crc kubenswrapper[4760]: I1124 17:38:05.643543 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:38:05 crc kubenswrapper[4760]: I1124 17:38:05.643609 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77" gracePeriod=600 Nov 24 17:38:06 crc kubenswrapper[4760]: I1124 17:38:06.140305 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77" exitCode=0 Nov 24 17:38:06 crc kubenswrapper[4760]: I1124 17:38:06.140391 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77"} Nov 24 17:38:06 crc kubenswrapper[4760]: I1124 17:38:06.141038 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2"} Nov 24 17:38:06 crc kubenswrapper[4760]: I1124 17:38:06.141148 4760 scope.go:117] "RemoveContainer" containerID="bdd868afe44a1cd3013debb0636d0d33a32fd51b8b4f85d33b6f94179fd53ff5" Nov 24 17:38:08 crc kubenswrapper[4760]: I1124 17:38:08.491361 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:08 crc kubenswrapper[4760]: I1124 17:38:08.492446 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:08 crc kubenswrapper[4760]: I1124 17:38:08.581864 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:09 crc kubenswrapper[4760]: I1124 17:38:09.226556 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:09 crc kubenswrapper[4760]: I1124 17:38:09.274635 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.186426 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mbd44" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="registry-server" containerID="cri-o://ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413" gracePeriod=2 Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.677046 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.743170 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content\") pod \"843677c0-77bb-46ee-9a98-78da63f20a62\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.743271 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities\") pod \"843677c0-77bb-46ee-9a98-78da63f20a62\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.743406 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kktcz\" (UniqueName: \"kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz\") pod \"843677c0-77bb-46ee-9a98-78da63f20a62\" (UID: \"843677c0-77bb-46ee-9a98-78da63f20a62\") " Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.744987 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities" (OuterVolumeSpecName: "utilities") pod "843677c0-77bb-46ee-9a98-78da63f20a62" (UID: "843677c0-77bb-46ee-9a98-78da63f20a62"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.753856 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz" (OuterVolumeSpecName: "kube-api-access-kktcz") pod "843677c0-77bb-46ee-9a98-78da63f20a62" (UID: "843677c0-77bb-46ee-9a98-78da63f20a62"). InnerVolumeSpecName "kube-api-access-kktcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.763229 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "843677c0-77bb-46ee-9a98-78da63f20a62" (UID: "843677c0-77bb-46ee-9a98-78da63f20a62"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.845420 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kktcz\" (UniqueName: \"kubernetes.io/projected/843677c0-77bb-46ee-9a98-78da63f20a62-kube-api-access-kktcz\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.845460 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:11 crc kubenswrapper[4760]: I1124 17:38:11.845470 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843677c0-77bb-46ee-9a98-78da63f20a62-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.196267 4760 generic.go:334] "Generic (PLEG): container finished" podID="843677c0-77bb-46ee-9a98-78da63f20a62" containerID="ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413" exitCode=0 Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.196323 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbd44" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.196343 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerDied","Data":"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413"} Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.196371 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbd44" event={"ID":"843677c0-77bb-46ee-9a98-78da63f20a62","Type":"ContainerDied","Data":"9d0b0f437221517c5c8bd18dd52af93aaaf027911eb82108885f3a36d97c0b00"} Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.196389 4760 scope.go:117] "RemoveContainer" containerID="ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.215081 4760 scope.go:117] "RemoveContainer" containerID="cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.246030 4760 scope.go:117] "RemoveContainer" containerID="a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.247395 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.258773 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbd44"] Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.289048 4760 scope.go:117] "RemoveContainer" containerID="ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413" Nov 24 17:38:12 crc kubenswrapper[4760]: E1124 17:38:12.289534 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413\": container with ID starting with ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413 not found: ID does not exist" containerID="ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.289563 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413"} err="failed to get container status \"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413\": rpc error: code = NotFound desc = could not find container \"ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413\": container with ID starting with ad8d06894f303d2643ffbcb3d0981bd03aca350255d2d8ef8ee2eb3f653e1413 not found: ID does not exist" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.289586 4760 scope.go:117] "RemoveContainer" containerID="cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6" Nov 24 17:38:12 crc kubenswrapper[4760]: E1124 17:38:12.289926 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6\": container with ID starting with cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6 not found: ID does not exist" containerID="cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.289967 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6"} err="failed to get container status \"cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6\": rpc error: code = NotFound desc = could not find container \"cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6\": container with ID starting with cdc42ff8e7b28419e3e47ffdf26e2b7eefc9aa642c867b16c32489aaf5a296f6 not found: ID does not exist" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.289993 4760 scope.go:117] "RemoveContainer" containerID="a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20" Nov 24 17:38:12 crc kubenswrapper[4760]: E1124 17:38:12.290304 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20\": container with ID starting with a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20 not found: ID does not exist" containerID="a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20" Nov 24 17:38:12 crc kubenswrapper[4760]: I1124 17:38:12.290333 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20"} err="failed to get container status \"a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20\": rpc error: code = NotFound desc = could not find container \"a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20\": container with ID starting with a584c502abe82d8c56a36b681557314b3f242fb54f29b9d570c5ed579eacbd20 not found: ID does not exist" Nov 24 17:38:13 crc kubenswrapper[4760]: I1124 17:38:13.480206 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" path="/var/lib/kubelet/pods/843677c0-77bb-46ee-9a98-78da63f20a62/volumes" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.224953 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:14 crc kubenswrapper[4760]: E1124 17:38:14.225456 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="registry-server" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.225476 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="registry-server" Nov 24 17:38:14 crc kubenswrapper[4760]: E1124 17:38:14.225490 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="extract-content" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.225497 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="extract-content" Nov 24 17:38:14 crc kubenswrapper[4760]: E1124 17:38:14.225519 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="extract-utilities" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.225527 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="extract-utilities" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.225730 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="843677c0-77bb-46ee-9a98-78da63f20a62" containerName="registry-server" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.227051 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.241277 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.290351 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt7q5\" (UniqueName: \"kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.290482 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.290682 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.392575 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.392892 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.393090 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt7q5\" (UniqueName: \"kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.393257 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.393796 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.417671 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt7q5\" (UniqueName: \"kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5\") pod \"redhat-operators-mt6h2\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.546885 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:14 crc kubenswrapper[4760]: I1124 17:38:14.996397 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:15 crc kubenswrapper[4760]: I1124 17:38:15.233703 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerStarted","Data":"297eb2b368d5306bdb4afeb3c76d702edc07c37870caff8fbc3b6baf1648cd82"} Nov 24 17:38:16 crc kubenswrapper[4760]: I1124 17:38:16.243610 4760 generic.go:334] "Generic (PLEG): container finished" podID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerID="ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f" exitCode=0 Nov 24 17:38:16 crc kubenswrapper[4760]: I1124 17:38:16.243678 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerDied","Data":"ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f"} Nov 24 17:38:17 crc kubenswrapper[4760]: I1124 17:38:17.257396 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerStarted","Data":"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f"} Nov 24 17:38:19 crc kubenswrapper[4760]: I1124 17:38:19.280713 4760 generic.go:334] "Generic (PLEG): container finished" podID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerID="6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f" exitCode=0 Nov 24 17:38:19 crc kubenswrapper[4760]: I1124 17:38:19.280829 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerDied","Data":"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f"} Nov 24 17:38:21 crc kubenswrapper[4760]: I1124 17:38:21.304321 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerStarted","Data":"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f"} Nov 24 17:38:24 crc kubenswrapper[4760]: I1124 17:38:24.547081 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:24 crc kubenswrapper[4760]: I1124 17:38:24.547620 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:25 crc kubenswrapper[4760]: I1124 17:38:25.592047 4760 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mt6h2" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="registry-server" probeResult="failure" output=< Nov 24 17:38:25 crc kubenswrapper[4760]: timeout: failed to connect service ":50051" within 1s Nov 24 17:38:25 crc kubenswrapper[4760]: > Nov 24 17:38:34 crc kubenswrapper[4760]: I1124 17:38:34.591598 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:34 crc kubenswrapper[4760]: I1124 17:38:34.617074 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mt6h2" podStartSLOduration=16.515419122 podStartE2EDuration="20.617052204s" podCreationTimestamp="2025-11-24 17:38:14 +0000 UTC" firstStartedPulling="2025-11-24 17:38:16.246340672 +0000 UTC m=+2091.569236742" lastFinishedPulling="2025-11-24 17:38:20.347988264 +0000 UTC m=+2095.670869824" observedRunningTime="2025-11-24 17:38:21.334133979 +0000 UTC m=+2096.657015539" watchObservedRunningTime="2025-11-24 17:38:34.617052204 +0000 UTC m=+2109.939933754" Nov 24 17:38:34 crc kubenswrapper[4760]: I1124 17:38:34.648877 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:34 crc kubenswrapper[4760]: I1124 17:38:34.827511 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:36 crc kubenswrapper[4760]: I1124 17:38:36.424594 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mt6h2" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="registry-server" containerID="cri-o://1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f" gracePeriod=2 Nov 24 17:38:36 crc kubenswrapper[4760]: I1124 17:38:36.876533 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.029597 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt7q5\" (UniqueName: \"kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5\") pod \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.029698 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content\") pod \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.029868 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities\") pod \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\" (UID: \"6757b988-f6d8-45f0-8b1f-e94479b05bdb\") " Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.030808 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities" (OuterVolumeSpecName: "utilities") pod "6757b988-f6d8-45f0-8b1f-e94479b05bdb" (UID: "6757b988-f6d8-45f0-8b1f-e94479b05bdb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.038295 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5" (OuterVolumeSpecName: "kube-api-access-qt7q5") pod "6757b988-f6d8-45f0-8b1f-e94479b05bdb" (UID: "6757b988-f6d8-45f0-8b1f-e94479b05bdb"). InnerVolumeSpecName "kube-api-access-qt7q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.102343 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6757b988-f6d8-45f0-8b1f-e94479b05bdb" (UID: "6757b988-f6d8-45f0-8b1f-e94479b05bdb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.132493 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.132524 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt7q5\" (UniqueName: \"kubernetes.io/projected/6757b988-f6d8-45f0-8b1f-e94479b05bdb-kube-api-access-qt7q5\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.132535 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6757b988-f6d8-45f0-8b1f-e94479b05bdb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.440219 4760 generic.go:334] "Generic (PLEG): container finished" podID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerID="1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f" exitCode=0 Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.440274 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerDied","Data":"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f"} Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.440303 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mt6h2" event={"ID":"6757b988-f6d8-45f0-8b1f-e94479b05bdb","Type":"ContainerDied","Data":"297eb2b368d5306bdb4afeb3c76d702edc07c37870caff8fbc3b6baf1648cd82"} Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.440322 4760 scope.go:117] "RemoveContainer" containerID="1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.440494 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mt6h2" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.467629 4760 scope.go:117] "RemoveContainer" containerID="6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.490726 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.499265 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mt6h2"] Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.502355 4760 scope.go:117] "RemoveContainer" containerID="ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.540897 4760 scope.go:117] "RemoveContainer" containerID="1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f" Nov 24 17:38:37 crc kubenswrapper[4760]: E1124 17:38:37.541582 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f\": container with ID starting with 1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f not found: ID does not exist" containerID="1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.541636 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f"} err="failed to get container status \"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f\": rpc error: code = NotFound desc = could not find container \"1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f\": container with ID starting with 1c3dfb45eca9bff0fc423f44839f775ed9df14ab39017920e527435877d10d7f not found: ID does not exist" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.541666 4760 scope.go:117] "RemoveContainer" containerID="6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f" Nov 24 17:38:37 crc kubenswrapper[4760]: E1124 17:38:37.542378 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f\": container with ID starting with 6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f not found: ID does not exist" containerID="6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.542511 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f"} err="failed to get container status \"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f\": rpc error: code = NotFound desc = could not find container \"6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f\": container with ID starting with 6955b8e234e608176957907980c237f7bc7eecfc367e4ad0f25c297cfd362a4f not found: ID does not exist" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.542596 4760 scope.go:117] "RemoveContainer" containerID="ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f" Nov 24 17:38:37 crc kubenswrapper[4760]: E1124 17:38:37.542996 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f\": container with ID starting with ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f not found: ID does not exist" containerID="ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f" Nov 24 17:38:37 crc kubenswrapper[4760]: I1124 17:38:37.543043 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f"} err="failed to get container status \"ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f\": rpc error: code = NotFound desc = could not find container \"ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f\": container with ID starting with ee573eb9acd3170d940f1f152c7be9c942e727b14a33454238e643fdb8682e6f not found: ID does not exist" Nov 24 17:38:39 crc kubenswrapper[4760]: I1124 17:38:39.476911 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" path="/var/lib/kubelet/pods/6757b988-f6d8-45f0-8b1f-e94479b05bdb/volumes" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.554688 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:39 crc kubenswrapper[4760]: E1124 17:39:39.555700 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="registry-server" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.555714 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="registry-server" Nov 24 17:39:39 crc kubenswrapper[4760]: E1124 17:39:39.555727 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="extract-utilities" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.555734 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="extract-utilities" Nov 24 17:39:39 crc kubenswrapper[4760]: E1124 17:39:39.555759 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="extract-content" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.555765 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="extract-content" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.555937 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="6757b988-f6d8-45f0-8b1f-e94479b05bdb" containerName="registry-server" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.557514 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.568580 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.725651 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9xc5\" (UniqueName: \"kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.725704 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.725790 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.827845 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9xc5\" (UniqueName: \"kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.827905 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.827940 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.828551 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.828626 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.857818 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9xc5\" (UniqueName: \"kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5\") pod \"community-operators-qxbf5\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:39 crc kubenswrapper[4760]: I1124 17:39:39.889449 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:40 crc kubenswrapper[4760]: I1124 17:39:40.448430 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:41 crc kubenswrapper[4760]: I1124 17:39:41.020158 4760 generic.go:334] "Generic (PLEG): container finished" podID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerID="eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41" exitCode=0 Nov 24 17:39:41 crc kubenswrapper[4760]: I1124 17:39:41.020253 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerDied","Data":"eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41"} Nov 24 17:39:41 crc kubenswrapper[4760]: I1124 17:39:41.020448 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerStarted","Data":"086bb88581042b2ae79a8566a1bd6fa18ac3a1db7824b46468c79804010ad445"} Nov 24 17:39:43 crc kubenswrapper[4760]: I1124 17:39:43.040821 4760 generic.go:334] "Generic (PLEG): container finished" podID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerID="5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f" exitCode=0 Nov 24 17:39:43 crc kubenswrapper[4760]: I1124 17:39:43.040893 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerDied","Data":"5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f"} Nov 24 17:39:45 crc kubenswrapper[4760]: I1124 17:39:45.061807 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerStarted","Data":"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105"} Nov 24 17:39:49 crc kubenswrapper[4760]: I1124 17:39:49.889728 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:49 crc kubenswrapper[4760]: I1124 17:39:49.890147 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:49 crc kubenswrapper[4760]: I1124 17:39:49.945107 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:49 crc kubenswrapper[4760]: I1124 17:39:49.966371 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qxbf5" podStartSLOduration=8.199116319 podStartE2EDuration="10.966354221s" podCreationTimestamp="2025-11-24 17:39:39 +0000 UTC" firstStartedPulling="2025-11-24 17:39:41.032397224 +0000 UTC m=+2176.355278814" lastFinishedPulling="2025-11-24 17:39:43.799635166 +0000 UTC m=+2179.122516716" observedRunningTime="2025-11-24 17:39:45.087478683 +0000 UTC m=+2180.410360233" watchObservedRunningTime="2025-11-24 17:39:49.966354221 +0000 UTC m=+2185.289235771" Nov 24 17:39:50 crc kubenswrapper[4760]: I1124 17:39:50.188451 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:50 crc kubenswrapper[4760]: I1124 17:39:50.234589 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.151109 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qxbf5" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="registry-server" containerID="cri-o://943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105" gracePeriod=2 Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.836115 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.890864 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9xc5\" (UniqueName: \"kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5\") pod \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.891438 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content\") pod \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.891469 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities\") pod \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\" (UID: \"355863c6-cfdf-4cc0-96bb-ce70f5719bc2\") " Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.892346 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities" (OuterVolumeSpecName: "utilities") pod "355863c6-cfdf-4cc0-96bb-ce70f5719bc2" (UID: "355863c6-cfdf-4cc0-96bb-ce70f5719bc2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.899907 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5" (OuterVolumeSpecName: "kube-api-access-p9xc5") pod "355863c6-cfdf-4cc0-96bb-ce70f5719bc2" (UID: "355863c6-cfdf-4cc0-96bb-ce70f5719bc2"). InnerVolumeSpecName "kube-api-access-p9xc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.942387 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "355863c6-cfdf-4cc0-96bb-ce70f5719bc2" (UID: "355863c6-cfdf-4cc0-96bb-ce70f5719bc2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.993782 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.993810 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:39:52 crc kubenswrapper[4760]: I1124 17:39:52.993819 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9xc5\" (UniqueName: \"kubernetes.io/projected/355863c6-cfdf-4cc0-96bb-ce70f5719bc2-kube-api-access-p9xc5\") on node \"crc\" DevicePath \"\"" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.161434 4760 generic.go:334] "Generic (PLEG): container finished" podID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerID="943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105" exitCode=0 Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.161486 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerDied","Data":"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105"} Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.161562 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxbf5" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.162370 4760 scope.go:117] "RemoveContainer" containerID="943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.162352 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxbf5" event={"ID":"355863c6-cfdf-4cc0-96bb-ce70f5719bc2","Type":"ContainerDied","Data":"086bb88581042b2ae79a8566a1bd6fa18ac3a1db7824b46468c79804010ad445"} Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.185880 4760 scope.go:117] "RemoveContainer" containerID="5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.208547 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.220541 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qxbf5"] Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.229866 4760 scope.go:117] "RemoveContainer" containerID="eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.252629 4760 scope.go:117] "RemoveContainer" containerID="943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105" Nov 24 17:39:53 crc kubenswrapper[4760]: E1124 17:39:53.255565 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105\": container with ID starting with 943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105 not found: ID does not exist" containerID="943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.255613 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105"} err="failed to get container status \"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105\": rpc error: code = NotFound desc = could not find container \"943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105\": container with ID starting with 943de2b57dac0cc879902d86cee8a7c858c5a7726d64010a14c59fc11e7b2105 not found: ID does not exist" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.255640 4760 scope.go:117] "RemoveContainer" containerID="5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f" Nov 24 17:39:53 crc kubenswrapper[4760]: E1124 17:39:53.255974 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f\": container with ID starting with 5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f not found: ID does not exist" containerID="5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.256021 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f"} err="failed to get container status \"5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f\": rpc error: code = NotFound desc = could not find container \"5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f\": container with ID starting with 5fdd1bfa2048b287cb60a6ef3c2236dee03cb249b2389d590c0d71583ceb118f not found: ID does not exist" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.256041 4760 scope.go:117] "RemoveContainer" containerID="eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41" Nov 24 17:39:53 crc kubenswrapper[4760]: E1124 17:39:53.256296 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41\": container with ID starting with eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41 not found: ID does not exist" containerID="eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.256325 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41"} err="failed to get container status \"eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41\": rpc error: code = NotFound desc = could not find container \"eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41\": container with ID starting with eb37b9954c7c7bacaab15cf2b4fa84a835596943caf2dfc3f569c3f82c86cb41 not found: ID does not exist" Nov 24 17:39:53 crc kubenswrapper[4760]: I1124 17:39:53.475649 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" path="/var/lib/kubelet/pods/355863c6-cfdf-4cc0-96bb-ce70f5719bc2/volumes" Nov 24 17:40:35 crc kubenswrapper[4760]: I1124 17:40:35.643191 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:40:35 crc kubenswrapper[4760]: I1124 17:40:35.644301 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.459739 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:40:55 crc kubenswrapper[4760]: E1124 17:40:55.460786 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="extract-content" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.460806 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="extract-content" Nov 24 17:40:55 crc kubenswrapper[4760]: E1124 17:40:55.460843 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="extract-utilities" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.460852 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="extract-utilities" Nov 24 17:40:55 crc kubenswrapper[4760]: E1124 17:40:55.460871 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="registry-server" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.460879 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="registry-server" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.461125 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="355863c6-cfdf-4cc0-96bb-ce70f5719bc2" containerName="registry-server" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.462744 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.482564 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.517776 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.517884 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.517906 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbl96\" (UniqueName: \"kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.620630 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.620792 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.620821 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbl96\" (UniqueName: \"kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.621256 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.621438 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.640646 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbl96\" (UniqueName: \"kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96\") pod \"certified-operators-llfnb\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:55 crc kubenswrapper[4760]: I1124 17:40:55.831091 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:40:56 crc kubenswrapper[4760]: I1124 17:40:56.011775 4760 generic.go:334] "Generic (PLEG): container finished" podID="d89f2f80-b7b0-49b2-beab-c4fd2d17352f" containerID="5a309a4cdf05e4bb9075c56f5e8b42ef9caa065374f7cb3d90aebe73670e9338" exitCode=0 Nov 24 17:40:56 crc kubenswrapper[4760]: I1124 17:40:56.011858 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" event={"ID":"d89f2f80-b7b0-49b2-beab-c4fd2d17352f","Type":"ContainerDied","Data":"5a309a4cdf05e4bb9075c56f5e8b42ef9caa065374f7cb3d90aebe73670e9338"} Nov 24 17:40:56 crc kubenswrapper[4760]: I1124 17:40:56.355862 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.025196 4760 generic.go:334] "Generic (PLEG): container finished" podID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerID="289996309c86dbe2629eba8ad59c5932a75644039237972e234fb2e1a4b0a278" exitCode=0 Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.025294 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerDied","Data":"289996309c86dbe2629eba8ad59c5932a75644039237972e234fb2e1a4b0a278"} Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.025724 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerStarted","Data":"e4020eac7f9681f92d8f9306999b77e99201e9b953da7fecb1db28249b000f39"} Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.027633 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.444359 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.479802 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0\") pod \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.480535 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key\") pod \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.480592 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle\") pod \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.480677 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory\") pod \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.480708 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nd647\" (UniqueName: \"kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647\") pod \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\" (UID: \"d89f2f80-b7b0-49b2-beab-c4fd2d17352f\") " Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.486395 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647" (OuterVolumeSpecName: "kube-api-access-nd647") pod "d89f2f80-b7b0-49b2-beab-c4fd2d17352f" (UID: "d89f2f80-b7b0-49b2-beab-c4fd2d17352f"). InnerVolumeSpecName "kube-api-access-nd647". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.490232 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d89f2f80-b7b0-49b2-beab-c4fd2d17352f" (UID: "d89f2f80-b7b0-49b2-beab-c4fd2d17352f"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.508982 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory" (OuterVolumeSpecName: "inventory") pod "d89f2f80-b7b0-49b2-beab-c4fd2d17352f" (UID: "d89f2f80-b7b0-49b2-beab-c4fd2d17352f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.511612 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "d89f2f80-b7b0-49b2-beab-c4fd2d17352f" (UID: "d89f2f80-b7b0-49b2-beab-c4fd2d17352f"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.516432 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d89f2f80-b7b0-49b2-beab-c4fd2d17352f" (UID: "d89f2f80-b7b0-49b2-beab-c4fd2d17352f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.582874 4760 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.582911 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.582922 4760 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.582937 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:40:57 crc kubenswrapper[4760]: I1124 17:40:57.582948 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nd647\" (UniqueName: \"kubernetes.io/projected/d89f2f80-b7b0-49b2-beab-c4fd2d17352f-kube-api-access-nd647\") on node \"crc\" DevicePath \"\"" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.035678 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" event={"ID":"d89f2f80-b7b0-49b2-beab-c4fd2d17352f","Type":"ContainerDied","Data":"05bc87a4d0ad6c526cca12310ee600bc658ebb8f9f494bffb5988e3246bb4d44"} Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.035725 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05bc87a4d0ad6c526cca12310ee600bc658ebb8f9f494bffb5988e3246bb4d44" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.035778 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.043974 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerStarted","Data":"f56ac982fcfa02e5cf64275aee01b735c9caf4d74b3feea450d078f391d58e14"} Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.128066 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt"] Nov 24 17:40:58 crc kubenswrapper[4760]: E1124 17:40:58.128771 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d89f2f80-b7b0-49b2-beab-c4fd2d17352f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.128791 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d89f2f80-b7b0-49b2-beab-c4fd2d17352f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.129045 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d89f2f80-b7b0-49b2-beab-c4fd2d17352f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.129812 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.138679 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt"] Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.144206 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.144417 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.144592 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.146076 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.148660 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.148670 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.148922 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.194771 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvzvz\" (UniqueName: \"kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.194883 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.194925 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.194955 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.195121 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.195284 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.195343 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.195520 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.195560 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297468 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297530 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297582 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvzvz\" (UniqueName: \"kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297657 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297697 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297728 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297759 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297811 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.297844 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.298754 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.303744 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.304049 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.304243 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.304348 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.304920 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.305444 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.305454 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.319144 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvzvz\" (UniqueName: \"kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5fgt\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.452838 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:40:58 crc kubenswrapper[4760]: I1124 17:40:58.953579 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt"] Nov 24 17:40:58 crc kubenswrapper[4760]: W1124 17:40:58.957884 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd29f6ba_13bc_4598_a031_18c0763458dc.slice/crio-218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b WatchSource:0}: Error finding container 218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b: Status 404 returned error can't find the container with id 218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b Nov 24 17:40:59 crc kubenswrapper[4760]: I1124 17:40:59.071160 4760 generic.go:334] "Generic (PLEG): container finished" podID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerID="f56ac982fcfa02e5cf64275aee01b735c9caf4d74b3feea450d078f391d58e14" exitCode=0 Nov 24 17:40:59 crc kubenswrapper[4760]: I1124 17:40:59.071245 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerDied","Data":"f56ac982fcfa02e5cf64275aee01b735c9caf4d74b3feea450d078f391d58e14"} Nov 24 17:40:59 crc kubenswrapper[4760]: I1124 17:40:59.072776 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" event={"ID":"cd29f6ba-13bc-4598-a031-18c0763458dc","Type":"ContainerStarted","Data":"218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b"} Nov 24 17:41:00 crc kubenswrapper[4760]: I1124 17:41:00.083266 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerStarted","Data":"d4f195f164dd2365c3f5a3fd74825fc9d3b8e4752401f54ad4b5b699d309106e"} Nov 24 17:41:00 crc kubenswrapper[4760]: I1124 17:41:00.086134 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" event={"ID":"cd29f6ba-13bc-4598-a031-18c0763458dc","Type":"ContainerStarted","Data":"6a28b6c5d6e492dc4fcb1f42cee6d51431f1281cce4064a113e5f12596560ffd"} Nov 24 17:41:00 crc kubenswrapper[4760]: I1124 17:41:00.119293 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-llfnb" podStartSLOduration=2.415043064 podStartE2EDuration="5.119269035s" podCreationTimestamp="2025-11-24 17:40:55 +0000 UTC" firstStartedPulling="2025-11-24 17:40:57.027434985 +0000 UTC m=+2252.350316535" lastFinishedPulling="2025-11-24 17:40:59.731660956 +0000 UTC m=+2255.054542506" observedRunningTime="2025-11-24 17:41:00.100068466 +0000 UTC m=+2255.422950016" watchObservedRunningTime="2025-11-24 17:41:00.119269035 +0000 UTC m=+2255.442150605" Nov 24 17:41:00 crc kubenswrapper[4760]: I1124 17:41:00.125730 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" podStartSLOduration=1.669486106 podStartE2EDuration="2.12570967s" podCreationTimestamp="2025-11-24 17:40:58 +0000 UTC" firstStartedPulling="2025-11-24 17:40:58.960279975 +0000 UTC m=+2254.283161525" lastFinishedPulling="2025-11-24 17:40:59.416503549 +0000 UTC m=+2254.739385089" observedRunningTime="2025-11-24 17:41:00.116694352 +0000 UTC m=+2255.439575912" watchObservedRunningTime="2025-11-24 17:41:00.12570967 +0000 UTC m=+2255.448591220" Nov 24 17:41:05 crc kubenswrapper[4760]: I1124 17:41:05.643034 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:41:05 crc kubenswrapper[4760]: I1124 17:41:05.643620 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:41:05 crc kubenswrapper[4760]: I1124 17:41:05.831216 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:05 crc kubenswrapper[4760]: I1124 17:41:05.831595 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:05 crc kubenswrapper[4760]: I1124 17:41:05.877139 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:06 crc kubenswrapper[4760]: I1124 17:41:06.209918 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:06 crc kubenswrapper[4760]: I1124 17:41:06.259644 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:41:08 crc kubenswrapper[4760]: I1124 17:41:08.146596 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-llfnb" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="registry-server" containerID="cri-o://d4f195f164dd2365c3f5a3fd74825fc9d3b8e4752401f54ad4b5b699d309106e" gracePeriod=2 Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.158928 4760 generic.go:334] "Generic (PLEG): container finished" podID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerID="d4f195f164dd2365c3f5a3fd74825fc9d3b8e4752401f54ad4b5b699d309106e" exitCode=0 Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.159025 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerDied","Data":"d4f195f164dd2365c3f5a3fd74825fc9d3b8e4752401f54ad4b5b699d309106e"} Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.778344 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.848768 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbl96\" (UniqueName: \"kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96\") pod \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.849839 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content\") pod \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.849919 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities\") pod \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\" (UID: \"43c1f543-397c-4fb2-8bde-6b40d5ff1302\") " Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.850932 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities" (OuterVolumeSpecName: "utilities") pod "43c1f543-397c-4fb2-8bde-6b40d5ff1302" (UID: "43c1f543-397c-4fb2-8bde-6b40d5ff1302"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.857367 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96" (OuterVolumeSpecName: "kube-api-access-qbl96") pod "43c1f543-397c-4fb2-8bde-6b40d5ff1302" (UID: "43c1f543-397c-4fb2-8bde-6b40d5ff1302"). InnerVolumeSpecName "kube-api-access-qbl96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.894965 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43c1f543-397c-4fb2-8bde-6b40d5ff1302" (UID: "43c1f543-397c-4fb2-8bde-6b40d5ff1302"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.953482 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.953557 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43c1f543-397c-4fb2-8bde-6b40d5ff1302-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:41:09 crc kubenswrapper[4760]: I1124 17:41:09.953584 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbl96\" (UniqueName: \"kubernetes.io/projected/43c1f543-397c-4fb2-8bde-6b40d5ff1302-kube-api-access-qbl96\") on node \"crc\" DevicePath \"\"" Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.172047 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-llfnb" event={"ID":"43c1f543-397c-4fb2-8bde-6b40d5ff1302","Type":"ContainerDied","Data":"e4020eac7f9681f92d8f9306999b77e99201e9b953da7fecb1db28249b000f39"} Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.172101 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-llfnb" Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.173133 4760 scope.go:117] "RemoveContainer" containerID="d4f195f164dd2365c3f5a3fd74825fc9d3b8e4752401f54ad4b5b699d309106e" Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.212487 4760 scope.go:117] "RemoveContainer" containerID="f56ac982fcfa02e5cf64275aee01b735c9caf4d74b3feea450d078f391d58e14" Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.218294 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.245783 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-llfnb"] Nov 24 17:41:10 crc kubenswrapper[4760]: I1124 17:41:10.248168 4760 scope.go:117] "RemoveContainer" containerID="289996309c86dbe2629eba8ad59c5932a75644039237972e234fb2e1a4b0a278" Nov 24 17:41:11 crc kubenswrapper[4760]: I1124 17:41:11.480247 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" path="/var/lib/kubelet/pods/43c1f543-397c-4fb2-8bde-6b40d5ff1302/volumes" Nov 24 17:41:35 crc kubenswrapper[4760]: I1124 17:41:35.644203 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:41:35 crc kubenswrapper[4760]: I1124 17:41:35.644823 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:41:35 crc kubenswrapper[4760]: I1124 17:41:35.644868 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:41:35 crc kubenswrapper[4760]: I1124 17:41:35.645623 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:41:35 crc kubenswrapper[4760]: I1124 17:41:35.645679 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" gracePeriod=600 Nov 24 17:41:36 crc kubenswrapper[4760]: E1124 17:41:36.270974 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:41:36 crc kubenswrapper[4760]: I1124 17:41:36.412172 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" exitCode=0 Nov 24 17:41:36 crc kubenswrapper[4760]: I1124 17:41:36.412219 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2"} Nov 24 17:41:36 crc kubenswrapper[4760]: I1124 17:41:36.412264 4760 scope.go:117] "RemoveContainer" containerID="34dd14c1d8b399108c0785894d67dba23d127edf9003f77e4b6eaa1ff928de77" Nov 24 17:41:36 crc kubenswrapper[4760]: I1124 17:41:36.412822 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:41:36 crc kubenswrapper[4760]: E1124 17:41:36.413106 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:41:49 crc kubenswrapper[4760]: I1124 17:41:49.466633 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:41:49 crc kubenswrapper[4760]: E1124 17:41:49.467379 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:42:04 crc kubenswrapper[4760]: I1124 17:42:04.467163 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:42:04 crc kubenswrapper[4760]: E1124 17:42:04.469368 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:42:18 crc kubenswrapper[4760]: I1124 17:42:18.466544 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:42:18 crc kubenswrapper[4760]: E1124 17:42:18.467380 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:42:31 crc kubenswrapper[4760]: I1124 17:42:31.466405 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:42:31 crc kubenswrapper[4760]: E1124 17:42:31.467280 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:42:45 crc kubenswrapper[4760]: I1124 17:42:45.478911 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:42:45 crc kubenswrapper[4760]: E1124 17:42:45.479999 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:42:57 crc kubenswrapper[4760]: I1124 17:42:57.470559 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:42:57 crc kubenswrapper[4760]: E1124 17:42:57.478202 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:43:08 crc kubenswrapper[4760]: I1124 17:43:08.466339 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:43:08 crc kubenswrapper[4760]: E1124 17:43:08.467363 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:43:23 crc kubenswrapper[4760]: I1124 17:43:23.466758 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:43:23 crc kubenswrapper[4760]: E1124 17:43:23.468125 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:43:35 crc kubenswrapper[4760]: I1124 17:43:35.945101 4760 generic.go:334] "Generic (PLEG): container finished" podID="cd29f6ba-13bc-4598-a031-18c0763458dc" containerID="6a28b6c5d6e492dc4fcb1f42cee6d51431f1281cce4064a113e5f12596560ffd" exitCode=0 Nov 24 17:43:35 crc kubenswrapper[4760]: I1124 17:43:35.945173 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" event={"ID":"cd29f6ba-13bc-4598-a031-18c0763458dc","Type":"ContainerDied","Data":"6a28b6c5d6e492dc4fcb1f42cee6d51431f1281cce4064a113e5f12596560ffd"} Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.348696 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356095 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356168 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356216 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356318 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356355 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356376 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356396 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356444 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvzvz\" (UniqueName: \"kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.356491 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1\") pod \"cd29f6ba-13bc-4598-a031-18c0763458dc\" (UID: \"cd29f6ba-13bc-4598-a031-18c0763458dc\") " Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.363057 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz" (OuterVolumeSpecName: "kube-api-access-qvzvz") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "kube-api-access-qvzvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.363195 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.387468 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.393613 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.400911 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory" (OuterVolumeSpecName: "inventory") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.404647 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.405469 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.406359 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.409996 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "cd29f6ba-13bc-4598-a031-18c0763458dc" (UID: "cd29f6ba-13bc-4598-a031-18c0763458dc"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461729 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvzvz\" (UniqueName: \"kubernetes.io/projected/cd29f6ba-13bc-4598-a031-18c0763458dc-kube-api-access-qvzvz\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461773 4760 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461812 4760 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461826 4760 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461838 4760 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461851 4760 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461864 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461875 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.461913 4760 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cd29f6ba-13bc-4598-a031-18c0763458dc-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.963672 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" event={"ID":"cd29f6ba-13bc-4598-a031-18c0763458dc","Type":"ContainerDied","Data":"218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b"} Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.964094 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="218e74bece26c2d6df6cd71f5d4cc6d5df1f4542ddc41e36c25d37159c4ea25b" Nov 24 17:43:37 crc kubenswrapper[4760]: I1124 17:43:37.963768 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5fgt" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.065698 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q"] Nov 24 17:43:38 crc kubenswrapper[4760]: E1124 17:43:38.066162 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd29f6ba-13bc-4598-a031-18c0763458dc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066177 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd29f6ba-13bc-4598-a031-18c0763458dc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 17:43:38 crc kubenswrapper[4760]: E1124 17:43:38.066193 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="extract-utilities" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066201 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="extract-utilities" Nov 24 17:43:38 crc kubenswrapper[4760]: E1124 17:43:38.066218 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="registry-server" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066224 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="registry-server" Nov 24 17:43:38 crc kubenswrapper[4760]: E1124 17:43:38.066250 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="extract-content" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066257 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="extract-content" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066450 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="43c1f543-397c-4fb2-8bde-6b40d5ff1302" containerName="registry-server" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.066467 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd29f6ba-13bc-4598-a031-18c0763458dc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.067245 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.069265 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.070727 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g48st" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.070766 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.071860 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.074535 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.074597 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.074631 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.075045 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.075079 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.075147 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.075983 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q"] Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.077867 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177505 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177593 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177676 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177776 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177813 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj2zg\" (UniqueName: \"kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.177953 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.178049 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.183064 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.183156 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.183716 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.184654 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.185146 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.199333 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.279287 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj2zg\" (UniqueName: \"kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.296162 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj2zg\" (UniqueName: \"kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.386074 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.466717 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:43:38 crc kubenswrapper[4760]: E1124 17:43:38.467049 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.898445 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q"] Nov 24 17:43:38 crc kubenswrapper[4760]: W1124 17:43:38.901613 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54204c3b_38f8_4e55_a645_b8c60b762c89.slice/crio-fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a WatchSource:0}: Error finding container fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a: Status 404 returned error can't find the container with id fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a Nov 24 17:43:38 crc kubenswrapper[4760]: I1124 17:43:38.972889 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" event={"ID":"54204c3b-38f8-4e55-a645-b8c60b762c89","Type":"ContainerStarted","Data":"fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a"} Nov 24 17:43:39 crc kubenswrapper[4760]: I1124 17:43:39.986680 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" event={"ID":"54204c3b-38f8-4e55-a645-b8c60b762c89","Type":"ContainerStarted","Data":"e37ab84dd2f8267dd9378874ce5f07d86e5d838a9093d9774512a16298473725"} Nov 24 17:43:40 crc kubenswrapper[4760]: I1124 17:43:40.015819 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" podStartSLOduration=1.529268741 podStartE2EDuration="2.01579071s" podCreationTimestamp="2025-11-24 17:43:38 +0000 UTC" firstStartedPulling="2025-11-24 17:43:38.903905928 +0000 UTC m=+2414.226787488" lastFinishedPulling="2025-11-24 17:43:39.390427907 +0000 UTC m=+2414.713309457" observedRunningTime="2025-11-24 17:43:40.002950022 +0000 UTC m=+2415.325831572" watchObservedRunningTime="2025-11-24 17:43:40.01579071 +0000 UTC m=+2415.338672250" Nov 24 17:43:53 crc kubenswrapper[4760]: I1124 17:43:53.467311 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:43:53 crc kubenswrapper[4760]: E1124 17:43:53.468458 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:44:08 crc kubenswrapper[4760]: I1124 17:44:08.466272 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:44:08 crc kubenswrapper[4760]: E1124 17:44:08.467159 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:44:20 crc kubenswrapper[4760]: I1124 17:44:20.466444 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:44:20 crc kubenswrapper[4760]: E1124 17:44:20.467482 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:44:32 crc kubenswrapper[4760]: I1124 17:44:32.466986 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:44:32 crc kubenswrapper[4760]: E1124 17:44:32.467882 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:44:46 crc kubenswrapper[4760]: I1124 17:44:46.465860 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:44:46 crc kubenswrapper[4760]: E1124 17:44:46.466707 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:44:58 crc kubenswrapper[4760]: I1124 17:44:58.467046 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:44:58 crc kubenswrapper[4760]: E1124 17:44:58.469149 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.151198 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg"] Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.153443 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.155734 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.161232 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.162675 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg"] Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.337690 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkhqc\" (UniqueName: \"kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.337979 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.338071 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.439604 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.439680 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.439732 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkhqc\" (UniqueName: \"kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.440699 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.445131 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.459224 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkhqc\" (UniqueName: \"kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc\") pod \"collect-profiles-29400105-slbfg\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.475074 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:00 crc kubenswrapper[4760]: I1124 17:45:00.925669 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg"] Nov 24 17:45:01 crc kubenswrapper[4760]: I1124 17:45:01.704171 4760 generic.go:334] "Generic (PLEG): container finished" podID="f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" containerID="d1ba8e764a27927c8028b17eaa32adf90c2be492d8079565b61f2432a893ec19" exitCode=0 Nov 24 17:45:01 crc kubenswrapper[4760]: I1124 17:45:01.704267 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" event={"ID":"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7","Type":"ContainerDied","Data":"d1ba8e764a27927c8028b17eaa32adf90c2be492d8079565b61f2432a893ec19"} Nov 24 17:45:01 crc kubenswrapper[4760]: I1124 17:45:01.704308 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" event={"ID":"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7","Type":"ContainerStarted","Data":"5a847309a7a252fbc525e18d540abd0603e46a0cfcca7fb6327882b2fe1c30b9"} Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.027923 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.189330 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkhqc\" (UniqueName: \"kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc\") pod \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.189505 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume\") pod \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.189539 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume\") pod \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\" (UID: \"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7\") " Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.190659 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume" (OuterVolumeSpecName: "config-volume") pod "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" (UID: "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.195669 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" (UID: "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.196299 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc" (OuterVolumeSpecName: "kube-api-access-lkhqc") pod "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" (UID: "f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7"). InnerVolumeSpecName "kube-api-access-lkhqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.291658 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkhqc\" (UniqueName: \"kubernetes.io/projected/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-kube-api-access-lkhqc\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.291693 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.291703 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.723812 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" event={"ID":"f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7","Type":"ContainerDied","Data":"5a847309a7a252fbc525e18d540abd0603e46a0cfcca7fb6327882b2fe1c30b9"} Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.723854 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a847309a7a252fbc525e18d540abd0603e46a0cfcca7fb6327882b2fe1c30b9" Nov 24 17:45:03 crc kubenswrapper[4760]: I1124 17:45:03.723908 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400105-slbfg" Nov 24 17:45:04 crc kubenswrapper[4760]: I1124 17:45:04.108881 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv"] Nov 24 17:45:04 crc kubenswrapper[4760]: I1124 17:45:04.120867 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400060-h5bsv"] Nov 24 17:45:05 crc kubenswrapper[4760]: I1124 17:45:05.476770 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="893b8ae8-4ab4-474e-b6bc-ed926c279c44" path="/var/lib/kubelet/pods/893b8ae8-4ab4-474e-b6bc-ed926c279c44/volumes" Nov 24 17:45:13 crc kubenswrapper[4760]: I1124 17:45:13.466573 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:45:13 crc kubenswrapper[4760]: E1124 17:45:13.468347 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:45:24 crc kubenswrapper[4760]: I1124 17:45:24.466575 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:45:24 crc kubenswrapper[4760]: E1124 17:45:24.467372 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:45:33 crc kubenswrapper[4760]: I1124 17:45:33.681279 4760 scope.go:117] "RemoveContainer" containerID="d8ca9cc287671f1004fe493e824a3a6bf0612ee8b572b7528a7a9621dee80e2d" Nov 24 17:45:38 crc kubenswrapper[4760]: I1124 17:45:38.466097 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:45:38 crc kubenswrapper[4760]: E1124 17:45:38.467092 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:45:49 crc kubenswrapper[4760]: I1124 17:45:49.466518 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:45:49 crc kubenswrapper[4760]: E1124 17:45:49.467534 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:45:52 crc kubenswrapper[4760]: I1124 17:45:52.181447 4760 generic.go:334] "Generic (PLEG): container finished" podID="54204c3b-38f8-4e55-a645-b8c60b762c89" containerID="e37ab84dd2f8267dd9378874ce5f07d86e5d838a9093d9774512a16298473725" exitCode=0 Nov 24 17:45:52 crc kubenswrapper[4760]: I1124 17:45:52.182130 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" event={"ID":"54204c3b-38f8-4e55-a645-b8c60b762c89","Type":"ContainerDied","Data":"e37ab84dd2f8267dd9378874ce5f07d86e5d838a9093d9774512a16298473725"} Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.604308 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.754842 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.754904 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.754976 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.755014 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.755165 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.755208 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.755248 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fj2zg\" (UniqueName: \"kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg\") pod \"54204c3b-38f8-4e55-a645-b8c60b762c89\" (UID: \"54204c3b-38f8-4e55-a645-b8c60b762c89\") " Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.761663 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.761673 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg" (OuterVolumeSpecName: "kube-api-access-fj2zg") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "kube-api-access-fj2zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.783877 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.784936 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.791068 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory" (OuterVolumeSpecName: "inventory") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.791543 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.792026 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "54204c3b-38f8-4e55-a645-b8c60b762c89" (UID: "54204c3b-38f8-4e55-a645-b8c60b762c89"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857660 4760 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857697 4760 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857710 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fj2zg\" (UniqueName: \"kubernetes.io/projected/54204c3b-38f8-4e55-a645-b8c60b762c89-kube-api-access-fj2zg\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857720 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857729 4760 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857739 4760 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:53 crc kubenswrapper[4760]: I1124 17:45:53.857748 4760 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54204c3b-38f8-4e55-a645-b8c60b762c89-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 17:45:54 crc kubenswrapper[4760]: I1124 17:45:54.199059 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" event={"ID":"54204c3b-38f8-4e55-a645-b8c60b762c89","Type":"ContainerDied","Data":"fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a"} Nov 24 17:45:54 crc kubenswrapper[4760]: I1124 17:45:54.199407 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa72a6381b92149b2b85daaa05114fc2d4769b62c03bfc547dfe8d09819c481a" Nov 24 17:45:54 crc kubenswrapper[4760]: I1124 17:45:54.199113 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q" Nov 24 17:46:02 crc kubenswrapper[4760]: I1124 17:46:02.467322 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:46:02 crc kubenswrapper[4760]: E1124 17:46:02.468130 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:46:14 crc kubenswrapper[4760]: I1124 17:46:14.466963 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:46:14 crc kubenswrapper[4760]: E1124 17:46:14.467713 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:46:21 crc kubenswrapper[4760]: E1124 17:46:21.770351 4760 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.230:51464->38.102.83.230:45313: write tcp 38.102.83.230:51464->38.102.83.230:45313: write: broken pipe Nov 24 17:46:27 crc kubenswrapper[4760]: I1124 17:46:27.470566 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:46:27 crc kubenswrapper[4760]: E1124 17:46:27.471308 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:46:38 crc kubenswrapper[4760]: I1124 17:46:38.467924 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:46:39 crc kubenswrapper[4760]: I1124 17:46:39.592946 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7"} Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.788830 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 17:46:44 crc kubenswrapper[4760]: E1124 17:46:44.789652 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" containerName="collect-profiles" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.789668 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" containerName="collect-profiles" Nov 24 17:46:44 crc kubenswrapper[4760]: E1124 17:46:44.789683 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54204c3b-38f8-4e55-a645-b8c60b762c89" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.789692 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="54204c3b-38f8-4e55-a645-b8c60b762c89" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.789892 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f11849e9-b8b9-48bc-b3e1-c3ec8c2303a7" containerName="collect-profiles" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.789931 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="54204c3b-38f8-4e55-a645-b8c60b762c89" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.790623 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.792471 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2v97f" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.792471 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.792945 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.793069 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.799133 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.823971 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824061 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824094 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824122 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824180 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824236 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rfx4\" (UniqueName: \"kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824333 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824499 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.824615 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.926779 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.926849 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.927249 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.927198 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.928126 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.928171 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931116 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931189 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931261 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rfx4\" (UniqueName: \"kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931334 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931484 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.931612 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.932968 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.933627 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.939194 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.944772 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.945167 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.952573 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rfx4\" (UniqueName: \"kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:44 crc kubenswrapper[4760]: I1124 17:46:44.961068 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " pod="openstack/tempest-tests-tempest" Nov 24 17:46:45 crc kubenswrapper[4760]: I1124 17:46:45.108318 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 17:46:45 crc kubenswrapper[4760]: I1124 17:46:45.533545 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 17:46:45 crc kubenswrapper[4760]: I1124 17:46:45.547699 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:46:45 crc kubenswrapper[4760]: I1124 17:46:45.640248 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01","Type":"ContainerStarted","Data":"dc3c73b1b1e791afd9e0143e6120fa7d0fec03917a9c81654242aea2e0c51e1d"} Nov 24 17:47:18 crc kubenswrapper[4760]: E1124 17:47:18.772276 4760 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 24 17:47:18 crc kubenswrapper[4760]: E1124 17:47:18.774236 4760 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8rfx4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 17:47:18 crc kubenswrapper[4760]: E1124 17:47:18.776017 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" Nov 24 17:47:19 crc kubenswrapper[4760]: E1124 17:47:19.009978 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" Nov 24 17:47:32 crc kubenswrapper[4760]: I1124 17:47:32.025347 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 24 17:47:33 crc kubenswrapper[4760]: I1124 17:47:33.130064 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01","Type":"ContainerStarted","Data":"91bfb00a96f6b1aeb02d92fc57d0f8f342c5df5a22e7041d9b8bfa404a038fd0"} Nov 24 17:47:33 crc kubenswrapper[4760]: I1124 17:47:33.148367 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.674211042 podStartE2EDuration="50.148350231s" podCreationTimestamp="2025-11-24 17:46:43 +0000 UTC" firstStartedPulling="2025-11-24 17:46:45.547421481 +0000 UTC m=+2600.870303031" lastFinishedPulling="2025-11-24 17:47:32.02156067 +0000 UTC m=+2647.344442220" observedRunningTime="2025-11-24 17:47:33.145478449 +0000 UTC m=+2648.468359999" watchObservedRunningTime="2025-11-24 17:47:33.148350231 +0000 UTC m=+2648.471231781" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.149462 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.152163 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.181864 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.262553 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llxm5\" (UniqueName: \"kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.262809 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.262912 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.364901 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llxm5\" (UniqueName: \"kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.365045 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.365301 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.365736 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.365897 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.386522 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llxm5\" (UniqueName: \"kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5\") pod \"redhat-operators-ljl9v\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:48 crc kubenswrapper[4760]: I1124 17:48:48.480801 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:49 crc kubenswrapper[4760]: I1124 17:48:49.075452 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:48:49 crc kubenswrapper[4760]: I1124 17:48:49.790680 4760 generic.go:334] "Generic (PLEG): container finished" podID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerID="292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e" exitCode=0 Nov 24 17:48:49 crc kubenswrapper[4760]: I1124 17:48:49.790739 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerDied","Data":"292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e"} Nov 24 17:48:49 crc kubenswrapper[4760]: I1124 17:48:49.790975 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerStarted","Data":"0cf418be440eed46c628790d30a1196157bb2891cad6dda1ece03a39a90f5c4c"} Nov 24 17:48:50 crc kubenswrapper[4760]: I1124 17:48:50.802212 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerStarted","Data":"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008"} Nov 24 17:48:52 crc kubenswrapper[4760]: I1124 17:48:52.826978 4760 generic.go:334] "Generic (PLEG): container finished" podID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerID="8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008" exitCode=0 Nov 24 17:48:52 crc kubenswrapper[4760]: I1124 17:48:52.827045 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerDied","Data":"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008"} Nov 24 17:48:54 crc kubenswrapper[4760]: I1124 17:48:54.846526 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerStarted","Data":"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19"} Nov 24 17:48:54 crc kubenswrapper[4760]: I1124 17:48:54.866524 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ljl9v" podStartSLOduration=2.969325243 podStartE2EDuration="6.866504653s" podCreationTimestamp="2025-11-24 17:48:48 +0000 UTC" firstStartedPulling="2025-11-24 17:48:49.792445372 +0000 UTC m=+2725.115326922" lastFinishedPulling="2025-11-24 17:48:53.689624782 +0000 UTC m=+2729.012506332" observedRunningTime="2025-11-24 17:48:54.863161797 +0000 UTC m=+2730.186043347" watchObservedRunningTime="2025-11-24 17:48:54.866504653 +0000 UTC m=+2730.189386203" Nov 24 17:48:58 crc kubenswrapper[4760]: I1124 17:48:58.481164 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:58 crc kubenswrapper[4760]: I1124 17:48:58.481730 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:58 crc kubenswrapper[4760]: I1124 17:48:58.537982 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:58 crc kubenswrapper[4760]: I1124 17:48:58.924656 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:48:58 crc kubenswrapper[4760]: I1124 17:48:58.973226 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:49:00 crc kubenswrapper[4760]: I1124 17:49:00.896147 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ljl9v" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="registry-server" containerID="cri-o://556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19" gracePeriod=2 Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.371770 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.420484 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content\") pod \"cba44a1a-6be1-4c22-81d4-94e9f3255642\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.420735 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llxm5\" (UniqueName: \"kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5\") pod \"cba44a1a-6be1-4c22-81d4-94e9f3255642\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.420801 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities\") pod \"cba44a1a-6be1-4c22-81d4-94e9f3255642\" (UID: \"cba44a1a-6be1-4c22-81d4-94e9f3255642\") " Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.421853 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities" (OuterVolumeSpecName: "utilities") pod "cba44a1a-6be1-4c22-81d4-94e9f3255642" (UID: "cba44a1a-6be1-4c22-81d4-94e9f3255642"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.450688 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5" (OuterVolumeSpecName: "kube-api-access-llxm5") pod "cba44a1a-6be1-4c22-81d4-94e9f3255642" (UID: "cba44a1a-6be1-4c22-81d4-94e9f3255642"). InnerVolumeSpecName "kube-api-access-llxm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.514780 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cba44a1a-6be1-4c22-81d4-94e9f3255642" (UID: "cba44a1a-6be1-4c22-81d4-94e9f3255642"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.522748 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llxm5\" (UniqueName: \"kubernetes.io/projected/cba44a1a-6be1-4c22-81d4-94e9f3255642-kube-api-access-llxm5\") on node \"crc\" DevicePath \"\"" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.523326 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.523337 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba44a1a-6be1-4c22-81d4-94e9f3255642-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.905712 4760 generic.go:334] "Generic (PLEG): container finished" podID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerID="556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19" exitCode=0 Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.905756 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerDied","Data":"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19"} Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.905783 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl9v" event={"ID":"cba44a1a-6be1-4c22-81d4-94e9f3255642","Type":"ContainerDied","Data":"0cf418be440eed46c628790d30a1196157bb2891cad6dda1ece03a39a90f5c4c"} Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.905760 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl9v" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.905800 4760 scope.go:117] "RemoveContainer" containerID="556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.935996 4760 scope.go:117] "RemoveContainer" containerID="8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008" Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.952371 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.959985 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ljl9v"] Nov 24 17:49:01 crc kubenswrapper[4760]: I1124 17:49:01.961930 4760 scope.go:117] "RemoveContainer" containerID="292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.009863 4760 scope.go:117] "RemoveContainer" containerID="556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19" Nov 24 17:49:02 crc kubenswrapper[4760]: E1124 17:49:02.010272 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19\": container with ID starting with 556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19 not found: ID does not exist" containerID="556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.010302 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19"} err="failed to get container status \"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19\": rpc error: code = NotFound desc = could not find container \"556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19\": container with ID starting with 556f2b3cf237edcc4f582f4b6ddd5179c855d2cc8a11a5a5ce036dbd960d8b19 not found: ID does not exist" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.010335 4760 scope.go:117] "RemoveContainer" containerID="8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008" Nov 24 17:49:02 crc kubenswrapper[4760]: E1124 17:49:02.010654 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008\": container with ID starting with 8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008 not found: ID does not exist" containerID="8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.010716 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008"} err="failed to get container status \"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008\": rpc error: code = NotFound desc = could not find container \"8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008\": container with ID starting with 8a0731a6f1921d60a7c530156e91db0b7685c7d5023f66a46b664c63ba257008 not found: ID does not exist" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.010753 4760 scope.go:117] "RemoveContainer" containerID="292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e" Nov 24 17:49:02 crc kubenswrapper[4760]: E1124 17:49:02.011141 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e\": container with ID starting with 292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e not found: ID does not exist" containerID="292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e" Nov 24 17:49:02 crc kubenswrapper[4760]: I1124 17:49:02.011207 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e"} err="failed to get container status \"292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e\": rpc error: code = NotFound desc = could not find container \"292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e\": container with ID starting with 292fddf158538601615fb14aa8579cfa00497549e4c58136c0d6461ac44c002e not found: ID does not exist" Nov 24 17:49:03 crc kubenswrapper[4760]: I1124 17:49:03.476742 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" path="/var/lib/kubelet/pods/cba44a1a-6be1-4c22-81d4-94e9f3255642/volumes" Nov 24 17:49:05 crc kubenswrapper[4760]: I1124 17:49:05.642589 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:49:05 crc kubenswrapper[4760]: I1124 17:49:05.643149 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:49:35 crc kubenswrapper[4760]: I1124 17:49:35.643202 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:49:35 crc kubenswrapper[4760]: I1124 17:49:35.643812 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:50:05 crc kubenswrapper[4760]: I1124 17:50:05.642840 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:50:05 crc kubenswrapper[4760]: I1124 17:50:05.644851 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:50:05 crc kubenswrapper[4760]: I1124 17:50:05.644989 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:50:05 crc kubenswrapper[4760]: I1124 17:50:05.645761 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:50:05 crc kubenswrapper[4760]: I1124 17:50:05.645887 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7" gracePeriod=600 Nov 24 17:50:06 crc kubenswrapper[4760]: I1124 17:50:06.534957 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7" exitCode=0 Nov 24 17:50:06 crc kubenswrapper[4760]: I1124 17:50:06.535061 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7"} Nov 24 17:50:06 crc kubenswrapper[4760]: I1124 17:50:06.535640 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f"} Nov 24 17:50:06 crc kubenswrapper[4760]: I1124 17:50:06.535671 4760 scope.go:117] "RemoveContainer" containerID="ce209f6c367404d532b5434fce7acb8fdbe777d76c985217cc659a859fcd3ae2" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.216674 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:50:51 crc kubenswrapper[4760]: E1124 17:50:51.218379 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="registry-server" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.218462 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="registry-server" Nov 24 17:50:51 crc kubenswrapper[4760]: E1124 17:50:51.218547 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="extract-utilities" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.218607 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="extract-utilities" Nov 24 17:50:51 crc kubenswrapper[4760]: E1124 17:50:51.218670 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="extract-content" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.218726 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="extract-content" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.218983 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba44a1a-6be1-4c22-81d4-94e9f3255642" containerName="registry-server" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.220373 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.235805 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.296297 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.296371 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltfgf\" (UniqueName: \"kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.296596 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.398611 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.398731 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.398790 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltfgf\" (UniqueName: \"kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.399276 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.399290 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.431867 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltfgf\" (UniqueName: \"kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf\") pod \"community-operators-zhkdc\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.541638 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:50:51 crc kubenswrapper[4760]: I1124 17:50:51.910403 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:50:52 crc kubenswrapper[4760]: I1124 17:50:52.209337 4760 generic.go:334] "Generic (PLEG): container finished" podID="d2278864-62c3-4703-afb3-35af08b61448" containerID="8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9" exitCode=0 Nov 24 17:50:52 crc kubenswrapper[4760]: I1124 17:50:52.209385 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerDied","Data":"8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9"} Nov 24 17:50:52 crc kubenswrapper[4760]: I1124 17:50:52.209416 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerStarted","Data":"465f4883120689e4a10f4d80263686772a41a34d341f484ba8cd0289d6bc9de6"} Nov 24 17:50:53 crc kubenswrapper[4760]: I1124 17:50:53.219833 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerStarted","Data":"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c"} Nov 24 17:50:54 crc kubenswrapper[4760]: I1124 17:50:54.229998 4760 generic.go:334] "Generic (PLEG): container finished" podID="d2278864-62c3-4703-afb3-35af08b61448" containerID="676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c" exitCode=0 Nov 24 17:50:54 crc kubenswrapper[4760]: I1124 17:50:54.230037 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerDied","Data":"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c"} Nov 24 17:50:55 crc kubenswrapper[4760]: I1124 17:50:55.240269 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerStarted","Data":"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f"} Nov 24 17:50:55 crc kubenswrapper[4760]: I1124 17:50:55.267186 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zhkdc" podStartSLOduration=1.8476344089999999 podStartE2EDuration="4.267160786s" podCreationTimestamp="2025-11-24 17:50:51 +0000 UTC" firstStartedPulling="2025-11-24 17:50:52.211393375 +0000 UTC m=+2847.534274925" lastFinishedPulling="2025-11-24 17:50:54.630919752 +0000 UTC m=+2849.953801302" observedRunningTime="2025-11-24 17:50:55.258350725 +0000 UTC m=+2850.581232275" watchObservedRunningTime="2025-11-24 17:50:55.267160786 +0000 UTC m=+2850.590042346" Nov 24 17:51:01 crc kubenswrapper[4760]: I1124 17:51:01.542581 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:01 crc kubenswrapper[4760]: I1124 17:51:01.543181 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:01 crc kubenswrapper[4760]: I1124 17:51:01.594623 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:02 crc kubenswrapper[4760]: I1124 17:51:02.353552 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:02 crc kubenswrapper[4760]: I1124 17:51:02.401777 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.324170 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zhkdc" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="registry-server" containerID="cri-o://8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f" gracePeriod=2 Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.884175 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.968428 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltfgf\" (UniqueName: \"kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf\") pod \"d2278864-62c3-4703-afb3-35af08b61448\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.968758 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities\") pod \"d2278864-62c3-4703-afb3-35af08b61448\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.968984 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content\") pod \"d2278864-62c3-4703-afb3-35af08b61448\" (UID: \"d2278864-62c3-4703-afb3-35af08b61448\") " Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.969695 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities" (OuterVolumeSpecName: "utilities") pod "d2278864-62c3-4703-afb3-35af08b61448" (UID: "d2278864-62c3-4703-afb3-35af08b61448"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:04 crc kubenswrapper[4760]: I1124 17:51:04.976350 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf" (OuterVolumeSpecName: "kube-api-access-ltfgf") pod "d2278864-62c3-4703-afb3-35af08b61448" (UID: "d2278864-62c3-4703-afb3-35af08b61448"). InnerVolumeSpecName "kube-api-access-ltfgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.023210 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2278864-62c3-4703-afb3-35af08b61448" (UID: "d2278864-62c3-4703-afb3-35af08b61448"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.072777 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltfgf\" (UniqueName: \"kubernetes.io/projected/d2278864-62c3-4703-afb3-35af08b61448-kube-api-access-ltfgf\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.072827 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.072867 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2278864-62c3-4703-afb3-35af08b61448-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.333544 4760 generic.go:334] "Generic (PLEG): container finished" podID="d2278864-62c3-4703-afb3-35af08b61448" containerID="8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f" exitCode=0 Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.333593 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerDied","Data":"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f"} Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.333617 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhkdc" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.334276 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhkdc" event={"ID":"d2278864-62c3-4703-afb3-35af08b61448","Type":"ContainerDied","Data":"465f4883120689e4a10f4d80263686772a41a34d341f484ba8cd0289d6bc9de6"} Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.334307 4760 scope.go:117] "RemoveContainer" containerID="8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.353980 4760 scope.go:117] "RemoveContainer" containerID="676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.376298 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.386898 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zhkdc"] Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.395506 4760 scope.go:117] "RemoveContainer" containerID="8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.435144 4760 scope.go:117] "RemoveContainer" containerID="8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f" Nov 24 17:51:05 crc kubenswrapper[4760]: E1124 17:51:05.435760 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f\": container with ID starting with 8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f not found: ID does not exist" containerID="8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.435810 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f"} err="failed to get container status \"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f\": rpc error: code = NotFound desc = could not find container \"8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f\": container with ID starting with 8898b706e27c30b3889ed355ff25457c5e877a9de02d601ded2f4f3ad48af73f not found: ID does not exist" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.435841 4760 scope.go:117] "RemoveContainer" containerID="676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c" Nov 24 17:51:05 crc kubenswrapper[4760]: E1124 17:51:05.436260 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c\": container with ID starting with 676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c not found: ID does not exist" containerID="676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.436308 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c"} err="failed to get container status \"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c\": rpc error: code = NotFound desc = could not find container \"676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c\": container with ID starting with 676d10f8b870be2b81edb0d8a13e088afa8b81d1b3cd4afd564c90dde1f9187c not found: ID does not exist" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.436335 4760 scope.go:117] "RemoveContainer" containerID="8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9" Nov 24 17:51:05 crc kubenswrapper[4760]: E1124 17:51:05.436676 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9\": container with ID starting with 8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9 not found: ID does not exist" containerID="8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.436710 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9"} err="failed to get container status \"8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9\": rpc error: code = NotFound desc = could not find container \"8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9\": container with ID starting with 8c385576bf436c5ee5d444f0ec272646471e729a76b61875a06822994d71c8c9 not found: ID does not exist" Nov 24 17:51:05 crc kubenswrapper[4760]: I1124 17:51:05.479359 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2278864-62c3-4703-afb3-35af08b61448" path="/var/lib/kubelet/pods/d2278864-62c3-4703-afb3-35af08b61448/volumes" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.391885 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:29 crc kubenswrapper[4760]: E1124 17:51:29.393245 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="extract-content" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.393306 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="extract-content" Nov 24 17:51:29 crc kubenswrapper[4760]: E1124 17:51:29.393549 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="registry-server" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.393558 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="registry-server" Nov 24 17:51:29 crc kubenswrapper[4760]: E1124 17:51:29.393617 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="extract-utilities" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.393628 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="extract-utilities" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.394150 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2278864-62c3-4703-afb3-35af08b61448" containerName="registry-server" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.398633 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.405292 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.474926 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmg66\" (UniqueName: \"kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.475136 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.475197 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.577248 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.577392 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmg66\" (UniqueName: \"kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.577520 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.577891 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.577997 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.596449 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmg66\" (UniqueName: \"kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66\") pod \"certified-operators-8jn6b\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:29 crc kubenswrapper[4760]: I1124 17:51:29.732279 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:30 crc kubenswrapper[4760]: I1124 17:51:30.265295 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:30 crc kubenswrapper[4760]: I1124 17:51:30.564403 4760 generic.go:334] "Generic (PLEG): container finished" podID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerID="fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f" exitCode=0 Nov 24 17:51:30 crc kubenswrapper[4760]: I1124 17:51:30.564587 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerDied","Data":"fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f"} Nov 24 17:51:30 crc kubenswrapper[4760]: I1124 17:51:30.564675 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerStarted","Data":"0818f90c3721605f750261e0fed03d2b24c9b02a1c64771eca10500986c23100"} Nov 24 17:51:31 crc kubenswrapper[4760]: I1124 17:51:31.576180 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerStarted","Data":"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b"} Nov 24 17:51:32 crc kubenswrapper[4760]: I1124 17:51:32.587549 4760 generic.go:334] "Generic (PLEG): container finished" podID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerID="6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b" exitCode=0 Nov 24 17:51:32 crc kubenswrapper[4760]: I1124 17:51:32.587686 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerDied","Data":"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b"} Nov 24 17:51:33 crc kubenswrapper[4760]: I1124 17:51:33.601004 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerStarted","Data":"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba"} Nov 24 17:51:33 crc kubenswrapper[4760]: I1124 17:51:33.619959 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8jn6b" podStartSLOduration=2.222633436 podStartE2EDuration="4.61993621s" podCreationTimestamp="2025-11-24 17:51:29 +0000 UTC" firstStartedPulling="2025-11-24 17:51:30.565836485 +0000 UTC m=+2885.888718035" lastFinishedPulling="2025-11-24 17:51:32.963139259 +0000 UTC m=+2888.286020809" observedRunningTime="2025-11-24 17:51:33.616275375 +0000 UTC m=+2888.939156955" watchObservedRunningTime="2025-11-24 17:51:33.61993621 +0000 UTC m=+2888.942817780" Nov 24 17:51:39 crc kubenswrapper[4760]: I1124 17:51:39.735122 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:39 crc kubenswrapper[4760]: I1124 17:51:39.735649 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:39 crc kubenswrapper[4760]: I1124 17:51:39.783706 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:40 crc kubenswrapper[4760]: I1124 17:51:40.730662 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:40 crc kubenswrapper[4760]: I1124 17:51:40.789372 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:42 crc kubenswrapper[4760]: I1124 17:51:42.697200 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8jn6b" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="registry-server" containerID="cri-o://90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba" gracePeriod=2 Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.177808 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.352368 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities\") pod \"8475486a-4961-4b4d-957f-ce64a1a2141e\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.352619 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmg66\" (UniqueName: \"kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66\") pod \"8475486a-4961-4b4d-957f-ce64a1a2141e\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.352647 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content\") pod \"8475486a-4961-4b4d-957f-ce64a1a2141e\" (UID: \"8475486a-4961-4b4d-957f-ce64a1a2141e\") " Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.353696 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities" (OuterVolumeSpecName: "utilities") pod "8475486a-4961-4b4d-957f-ce64a1a2141e" (UID: "8475486a-4961-4b4d-957f-ce64a1a2141e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.360073 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66" (OuterVolumeSpecName: "kube-api-access-lmg66") pod "8475486a-4961-4b4d-957f-ce64a1a2141e" (UID: "8475486a-4961-4b4d-957f-ce64a1a2141e"). InnerVolumeSpecName "kube-api-access-lmg66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.397972 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8475486a-4961-4b4d-957f-ce64a1a2141e" (UID: "8475486a-4961-4b4d-957f-ce64a1a2141e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.454640 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.454687 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmg66\" (UniqueName: \"kubernetes.io/projected/8475486a-4961-4b4d-957f-ce64a1a2141e-kube-api-access-lmg66\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.454702 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8475486a-4961-4b4d-957f-ce64a1a2141e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.708722 4760 generic.go:334] "Generic (PLEG): container finished" podID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerID="90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba" exitCode=0 Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.708774 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jn6b" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.708791 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerDied","Data":"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba"} Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.710033 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jn6b" event={"ID":"8475486a-4961-4b4d-957f-ce64a1a2141e","Type":"ContainerDied","Data":"0818f90c3721605f750261e0fed03d2b24c9b02a1c64771eca10500986c23100"} Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.710053 4760 scope.go:117] "RemoveContainer" containerID="90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.733913 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.734571 4760 scope.go:117] "RemoveContainer" containerID="6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.742523 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8jn6b"] Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.758076 4760 scope.go:117] "RemoveContainer" containerID="fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.801876 4760 scope.go:117] "RemoveContainer" containerID="90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba" Nov 24 17:51:43 crc kubenswrapper[4760]: E1124 17:51:43.802634 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba\": container with ID starting with 90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba not found: ID does not exist" containerID="90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.802680 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba"} err="failed to get container status \"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba\": rpc error: code = NotFound desc = could not find container \"90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba\": container with ID starting with 90a173ebe4c6632fa8a05fb44b0931a3b0c27e71c8ed5282fa009d4dd3fd8aba not found: ID does not exist" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.802708 4760 scope.go:117] "RemoveContainer" containerID="6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b" Nov 24 17:51:43 crc kubenswrapper[4760]: E1124 17:51:43.803055 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b\": container with ID starting with 6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b not found: ID does not exist" containerID="6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.803079 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b"} err="failed to get container status \"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b\": rpc error: code = NotFound desc = could not find container \"6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b\": container with ID starting with 6f8d5e0bac7064e3e6ad6a6b0f31b17de5a6f9825272dbabaa3ad6e4abcfee7b not found: ID does not exist" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.803095 4760 scope.go:117] "RemoveContainer" containerID="fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f" Nov 24 17:51:43 crc kubenswrapper[4760]: E1124 17:51:43.803624 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f\": container with ID starting with fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f not found: ID does not exist" containerID="fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f" Nov 24 17:51:43 crc kubenswrapper[4760]: I1124 17:51:43.803721 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f"} err="failed to get container status \"fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f\": rpc error: code = NotFound desc = could not find container \"fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f\": container with ID starting with fcf16c4a30f43daa5cee0bf9b7ec8cc791b043012e15d3c0f8f25b1a2922919f not found: ID does not exist" Nov 24 17:51:45 crc kubenswrapper[4760]: I1124 17:51:45.480471 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" path="/var/lib/kubelet/pods/8475486a-4961-4b4d-957f-ce64a1a2141e/volumes" Nov 24 17:52:05 crc kubenswrapper[4760]: I1124 17:52:05.642379 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:52:05 crc kubenswrapper[4760]: I1124 17:52:05.642941 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.790776 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:08 crc kubenswrapper[4760]: E1124 17:52:08.791486 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="registry-server" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.791500 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="registry-server" Nov 24 17:52:08 crc kubenswrapper[4760]: E1124 17:52:08.791511 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="extract-utilities" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.791517 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="extract-utilities" Nov 24 17:52:08 crc kubenswrapper[4760]: E1124 17:52:08.791527 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="extract-content" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.791534 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="extract-content" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.791731 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="8475486a-4961-4b4d-957f-ce64a1a2141e" containerName="registry-server" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.798861 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.805799 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.926031 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.926141 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz8r5\" (UniqueName: \"kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:08 crc kubenswrapper[4760]: I1124 17:52:08.926431 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.027953 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.028321 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz8r5\" (UniqueName: \"kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.028450 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.028484 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.028750 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.050034 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz8r5\" (UniqueName: \"kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5\") pod \"redhat-marketplace-dbc4p\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.121396 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.595844 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:09 crc kubenswrapper[4760]: W1124 17:52:09.599212 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6dbdd04a_b09e_4354_af98_3a6134158b50.slice/crio-47c13cee4e0b6169a71e42b3c4a1aa54cb6e6731ece5ad7a33fdd1ee981eacd0 WatchSource:0}: Error finding container 47c13cee4e0b6169a71e42b3c4a1aa54cb6e6731ece5ad7a33fdd1ee981eacd0: Status 404 returned error can't find the container with id 47c13cee4e0b6169a71e42b3c4a1aa54cb6e6731ece5ad7a33fdd1ee981eacd0 Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.945844 4760 generic.go:334] "Generic (PLEG): container finished" podID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerID="242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4" exitCode=0 Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.945957 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerDied","Data":"242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4"} Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.946251 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerStarted","Data":"47c13cee4e0b6169a71e42b3c4a1aa54cb6e6731ece5ad7a33fdd1ee981eacd0"} Nov 24 17:52:09 crc kubenswrapper[4760]: I1124 17:52:09.947716 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:52:10 crc kubenswrapper[4760]: I1124 17:52:10.956701 4760 generic.go:334] "Generic (PLEG): container finished" podID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerID="195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f" exitCode=0 Nov 24 17:52:10 crc kubenswrapper[4760]: I1124 17:52:10.956751 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerDied","Data":"195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f"} Nov 24 17:52:11 crc kubenswrapper[4760]: I1124 17:52:11.967422 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerStarted","Data":"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7"} Nov 24 17:52:11 crc kubenswrapper[4760]: I1124 17:52:11.993097 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dbc4p" podStartSLOduration=2.559786816 podStartE2EDuration="3.993076993s" podCreationTimestamp="2025-11-24 17:52:08 +0000 UTC" firstStartedPulling="2025-11-24 17:52:09.947424313 +0000 UTC m=+2925.270305863" lastFinishedPulling="2025-11-24 17:52:11.38071449 +0000 UTC m=+2926.703596040" observedRunningTime="2025-11-24 17:52:11.98421842 +0000 UTC m=+2927.307099980" watchObservedRunningTime="2025-11-24 17:52:11.993076993 +0000 UTC m=+2927.315958543" Nov 24 17:52:19 crc kubenswrapper[4760]: I1124 17:52:19.122913 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:19 crc kubenswrapper[4760]: I1124 17:52:19.123734 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:19 crc kubenswrapper[4760]: I1124 17:52:19.173361 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:20 crc kubenswrapper[4760]: I1124 17:52:20.084277 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:20 crc kubenswrapper[4760]: I1124 17:52:20.138987 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.059087 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dbc4p" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="registry-server" containerID="cri-o://7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7" gracePeriod=2 Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.608561 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.704716 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities\") pod \"6dbdd04a-b09e-4354-af98-3a6134158b50\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.704875 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz8r5\" (UniqueName: \"kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5\") pod \"6dbdd04a-b09e-4354-af98-3a6134158b50\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.705074 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content\") pod \"6dbdd04a-b09e-4354-af98-3a6134158b50\" (UID: \"6dbdd04a-b09e-4354-af98-3a6134158b50\") " Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.705771 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities" (OuterVolumeSpecName: "utilities") pod "6dbdd04a-b09e-4354-af98-3a6134158b50" (UID: "6dbdd04a-b09e-4354-af98-3a6134158b50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.710029 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5" (OuterVolumeSpecName: "kube-api-access-gz8r5") pod "6dbdd04a-b09e-4354-af98-3a6134158b50" (UID: "6dbdd04a-b09e-4354-af98-3a6134158b50"). InnerVolumeSpecName "kube-api-access-gz8r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.721419 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6dbdd04a-b09e-4354-af98-3a6134158b50" (UID: "6dbdd04a-b09e-4354-af98-3a6134158b50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.807429 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.807467 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbdd04a-b09e-4354-af98-3a6134158b50-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:22 crc kubenswrapper[4760]: I1124 17:52:22.807477 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz8r5\" (UniqueName: \"kubernetes.io/projected/6dbdd04a-b09e-4354-af98-3a6134158b50-kube-api-access-gz8r5\") on node \"crc\" DevicePath \"\"" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.069905 4760 generic.go:334] "Generic (PLEG): container finished" podID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerID="7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7" exitCode=0 Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.069958 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerDied","Data":"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7"} Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.070320 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbc4p" event={"ID":"6dbdd04a-b09e-4354-af98-3a6134158b50","Type":"ContainerDied","Data":"47c13cee4e0b6169a71e42b3c4a1aa54cb6e6731ece5ad7a33fdd1ee981eacd0"} Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.070352 4760 scope.go:117] "RemoveContainer" containerID="7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.069986 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbc4p" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.106671 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.108273 4760 scope.go:117] "RemoveContainer" containerID="195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.115911 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbc4p"] Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.138354 4760 scope.go:117] "RemoveContainer" containerID="242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.184264 4760 scope.go:117] "RemoveContainer" containerID="7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7" Nov 24 17:52:23 crc kubenswrapper[4760]: E1124 17:52:23.184829 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7\": container with ID starting with 7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7 not found: ID does not exist" containerID="7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.184892 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7"} err="failed to get container status \"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7\": rpc error: code = NotFound desc = could not find container \"7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7\": container with ID starting with 7047fec4f9685c454f81f35e3faf994044b784b85abc32496e708486066a30e7 not found: ID does not exist" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.184921 4760 scope.go:117] "RemoveContainer" containerID="195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f" Nov 24 17:52:23 crc kubenswrapper[4760]: E1124 17:52:23.185628 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f\": container with ID starting with 195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f not found: ID does not exist" containerID="195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.186331 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f"} err="failed to get container status \"195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f\": rpc error: code = NotFound desc = could not find container \"195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f\": container with ID starting with 195654bb343ca0f609438795cc60c8cc45c8eb42ca0b77ce49a719fe2453a20f not found: ID does not exist" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.186373 4760 scope.go:117] "RemoveContainer" containerID="242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4" Nov 24 17:52:23 crc kubenswrapper[4760]: E1124 17:52:23.186734 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4\": container with ID starting with 242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4 not found: ID does not exist" containerID="242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.186771 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4"} err="failed to get container status \"242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4\": rpc error: code = NotFound desc = could not find container \"242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4\": container with ID starting with 242635fa68d88915aedc1039c71852465017b1c420cd8582a3cb7cf5c8fa08c4 not found: ID does not exist" Nov 24 17:52:23 crc kubenswrapper[4760]: I1124 17:52:23.479108 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" path="/var/lib/kubelet/pods/6dbdd04a-b09e-4354-af98-3a6134158b50/volumes" Nov 24 17:52:35 crc kubenswrapper[4760]: I1124 17:52:35.642652 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:52:35 crc kubenswrapper[4760]: I1124 17:52:35.644190 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:53:05 crc kubenswrapper[4760]: I1124 17:53:05.642759 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 17:53:05 crc kubenswrapper[4760]: I1124 17:53:05.643305 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 17:53:05 crc kubenswrapper[4760]: I1124 17:53:05.643354 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 17:53:05 crc kubenswrapper[4760]: I1124 17:53:05.644631 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 17:53:05 crc kubenswrapper[4760]: I1124 17:53:05.644951 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" gracePeriod=600 Nov 24 17:53:05 crc kubenswrapper[4760]: E1124 17:53:05.766331 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:53:06 crc kubenswrapper[4760]: I1124 17:53:06.466782 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" exitCode=0 Nov 24 17:53:06 crc kubenswrapper[4760]: I1124 17:53:06.466835 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f"} Nov 24 17:53:06 crc kubenswrapper[4760]: I1124 17:53:06.466866 4760 scope.go:117] "RemoveContainer" containerID="655d062a73e4e4c9d1e66c56f8dd43ba60f89b20b2da8c3a9f553aaf836cb8d7" Nov 24 17:53:06 crc kubenswrapper[4760]: I1124 17:53:06.468178 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:53:06 crc kubenswrapper[4760]: E1124 17:53:06.468612 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:53:19 crc kubenswrapper[4760]: I1124 17:53:19.466797 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:53:19 crc kubenswrapper[4760]: E1124 17:53:19.467573 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:53:33 crc kubenswrapper[4760]: I1124 17:53:33.466883 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:53:33 crc kubenswrapper[4760]: E1124 17:53:33.467675 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:53:46 crc kubenswrapper[4760]: I1124 17:53:46.466166 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:53:46 crc kubenswrapper[4760]: E1124 17:53:46.466898 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:54:01 crc kubenswrapper[4760]: I1124 17:54:01.467413 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:54:01 crc kubenswrapper[4760]: E1124 17:54:01.468188 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:54:13 crc kubenswrapper[4760]: I1124 17:54:13.467623 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:54:13 crc kubenswrapper[4760]: E1124 17:54:13.468442 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:54:27 crc kubenswrapper[4760]: I1124 17:54:27.466984 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:54:27 crc kubenswrapper[4760]: E1124 17:54:27.467918 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:54:42 crc kubenswrapper[4760]: I1124 17:54:42.467237 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:54:42 crc kubenswrapper[4760]: E1124 17:54:42.469607 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:54:57 crc kubenswrapper[4760]: I1124 17:54:57.466909 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:54:57 crc kubenswrapper[4760]: E1124 17:54:57.467678 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:55:11 crc kubenswrapper[4760]: I1124 17:55:11.466694 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:55:11 crc kubenswrapper[4760]: E1124 17:55:11.467557 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:55:25 crc kubenswrapper[4760]: I1124 17:55:25.474720 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:55:25 crc kubenswrapper[4760]: E1124 17:55:25.475509 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:55:36 crc kubenswrapper[4760]: I1124 17:55:36.466586 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:55:36 crc kubenswrapper[4760]: E1124 17:55:36.467476 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:55:50 crc kubenswrapper[4760]: I1124 17:55:50.466915 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:55:50 crc kubenswrapper[4760]: E1124 17:55:50.467827 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:56:01 crc kubenswrapper[4760]: I1124 17:56:01.466879 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:56:01 crc kubenswrapper[4760]: E1124 17:56:01.467625 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:56:12 crc kubenswrapper[4760]: I1124 17:56:12.466814 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:56:12 crc kubenswrapper[4760]: E1124 17:56:12.468432 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:56:24 crc kubenswrapper[4760]: I1124 17:56:24.466811 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:56:24 crc kubenswrapper[4760]: E1124 17:56:24.467559 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:56:35 crc kubenswrapper[4760]: I1124 17:56:35.472480 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:56:35 crc kubenswrapper[4760]: E1124 17:56:35.473323 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:56:49 crc kubenswrapper[4760]: I1124 17:56:49.466652 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:56:49 crc kubenswrapper[4760]: E1124 17:56:49.468241 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:57:02 crc kubenswrapper[4760]: I1124 17:57:02.467044 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:57:02 crc kubenswrapper[4760]: E1124 17:57:02.467810 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:57:16 crc kubenswrapper[4760]: I1124 17:57:16.466392 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:57:16 crc kubenswrapper[4760]: E1124 17:57:16.467168 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:57:31 crc kubenswrapper[4760]: I1124 17:57:31.466300 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:57:31 crc kubenswrapper[4760]: E1124 17:57:31.467138 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:57:45 crc kubenswrapper[4760]: I1124 17:57:45.475392 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:57:45 crc kubenswrapper[4760]: E1124 17:57:45.476369 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:57:57 crc kubenswrapper[4760]: I1124 17:57:57.467360 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:57:57 crc kubenswrapper[4760]: E1124 17:57:57.468437 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 17:58:11 crc kubenswrapper[4760]: I1124 17:58:11.466888 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 17:58:12 crc kubenswrapper[4760]: I1124 17:58:12.156883 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b"} Nov 24 17:58:13 crc kubenswrapper[4760]: I1124 17:58:13.165940 4760 generic.go:334] "Generic (PLEG): container finished" podID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" containerID="91bfb00a96f6b1aeb02d92fc57d0f8f342c5df5a22e7041d9b8bfa404a038fd0" exitCode=0 Nov 24 17:58:13 crc kubenswrapper[4760]: I1124 17:58:13.166065 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01","Type":"ContainerDied","Data":"91bfb00a96f6b1aeb02d92fc57d0f8f342c5df5a22e7041d9b8bfa404a038fd0"} Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.526481 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603116 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603184 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rfx4\" (UniqueName: \"kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603247 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603275 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603378 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603454 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603725 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603776 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.603856 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs\") pod \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\" (UID: \"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01\") " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.604019 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.604431 4760 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.604986 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data" (OuterVolumeSpecName: "config-data") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.611448 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.611539 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4" (OuterVolumeSpecName: "kube-api-access-8rfx4") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "kube-api-access-8rfx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.617353 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.632406 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.633209 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.640463 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.665291 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" (UID: "eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.707965 4760 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708042 4760 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708060 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rfx4\" (UniqueName: \"kubernetes.io/projected/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-kube-api-access-8rfx4\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708076 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708089 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708100 4760 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708111 4760 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.708125 4760 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.727399 4760 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 24 17:58:14 crc kubenswrapper[4760]: I1124 17:58:14.809596 4760 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 24 17:58:15 crc kubenswrapper[4760]: I1124 17:58:15.193334 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01","Type":"ContainerDied","Data":"dc3c73b1b1e791afd9e0143e6120fa7d0fec03917a9c81654242aea2e0c51e1d"} Nov 24 17:58:15 crc kubenswrapper[4760]: I1124 17:58:15.193836 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc3c73b1b1e791afd9e0143e6120fa7d0fec03917a9c81654242aea2e0c51e1d" Nov 24 17:58:15 crc kubenswrapper[4760]: I1124 17:58:15.193443 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.177798 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 17:58:24 crc kubenswrapper[4760]: E1124 17:58:24.178814 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="extract-utilities" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.178829 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="extract-utilities" Nov 24 17:58:24 crc kubenswrapper[4760]: E1124 17:58:24.178837 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="registry-server" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.178844 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="registry-server" Nov 24 17:58:24 crc kubenswrapper[4760]: E1124 17:58:24.178853 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" containerName="tempest-tests-tempest-tests-runner" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.178859 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" containerName="tempest-tests-tempest-tests-runner" Nov 24 17:58:24 crc kubenswrapper[4760]: E1124 17:58:24.178880 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="extract-content" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.178886 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="extract-content" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.179517 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01" containerName="tempest-tests-tempest-tests-runner" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.179535 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dbdd04a-b09e-4354-af98-3a6134158b50" containerName="registry-server" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.180343 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.186124 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2v97f" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.189684 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.321154 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.321225 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7qks\" (UniqueName: \"kubernetes.io/projected/d89a1dea-6dac-4fd1-bb44-55076bd67fba-kube-api-access-r7qks\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.422804 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7qks\" (UniqueName: \"kubernetes.io/projected/d89a1dea-6dac-4fd1-bb44-55076bd67fba-kube-api-access-r7qks\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.422984 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.423499 4760 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.446436 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7qks\" (UniqueName: \"kubernetes.io/projected/d89a1dea-6dac-4fd1-bb44-55076bd67fba-kube-api-access-r7qks\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.448651 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d89a1dea-6dac-4fd1-bb44-55076bd67fba\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.500038 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.972537 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 17:58:24 crc kubenswrapper[4760]: I1124 17:58:24.975720 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 17:58:25 crc kubenswrapper[4760]: I1124 17:58:25.292658 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"d89a1dea-6dac-4fd1-bb44-55076bd67fba","Type":"ContainerStarted","Data":"b44985a48775fb869ea53880d8a159e009c0fff9c4ca26d71508c2a740e3caae"} Nov 24 17:58:26 crc kubenswrapper[4760]: I1124 17:58:26.303589 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"d89a1dea-6dac-4fd1-bb44-55076bd67fba","Type":"ContainerStarted","Data":"ac2f92420f8ae8d1dce374411ab86711518013068e9e7e063a8fa7e7a82f8437"} Nov 24 17:58:26 crc kubenswrapper[4760]: I1124 17:58:26.317474 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.339801816 podStartE2EDuration="2.317457204s" podCreationTimestamp="2025-11-24 17:58:24 +0000 UTC" firstStartedPulling="2025-11-24 17:58:24.975394262 +0000 UTC m=+3300.298275812" lastFinishedPulling="2025-11-24 17:58:25.95304965 +0000 UTC m=+3301.275931200" observedRunningTime="2025-11-24 17:58:26.315719704 +0000 UTC m=+3301.638601264" watchObservedRunningTime="2025-11-24 17:58:26.317457204 +0000 UTC m=+3301.640338764" Nov 24 17:58:48 crc kubenswrapper[4760]: I1124 17:58:48.982342 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nz6g9/must-gather-7dx5g"] Nov 24 17:58:48 crc kubenswrapper[4760]: I1124 17:58:48.984499 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:48 crc kubenswrapper[4760]: I1124 17:58:48.990096 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-nz6g9"/"default-dockercfg-nbzcb" Nov 24 17:58:48 crc kubenswrapper[4760]: I1124 17:58:48.990120 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-nz6g9"/"kube-root-ca.crt" Nov 24 17:58:48 crc kubenswrapper[4760]: I1124 17:58:48.990136 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-nz6g9"/"openshift-service-ca.crt" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.002234 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-nz6g9/must-gather-7dx5g"] Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.075631 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch8m8\" (UniqueName: \"kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.076028 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.178172 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.178292 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch8m8\" (UniqueName: \"kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.178963 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.199040 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch8m8\" (UniqueName: \"kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8\") pod \"must-gather-7dx5g\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.301238 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 17:58:49 crc kubenswrapper[4760]: I1124 17:58:49.808363 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-nz6g9/must-gather-7dx5g"] Nov 24 17:58:50 crc kubenswrapper[4760]: I1124 17:58:50.512680 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" event={"ID":"01a25ea0-debf-4dbb-89a7-75087fa4d098","Type":"ContainerStarted","Data":"2b7ffd24dbee20cb3593d420a07d7117130ad332879049a63e0e6691a572e775"} Nov 24 17:58:54 crc kubenswrapper[4760]: I1124 17:58:54.553554 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" event={"ID":"01a25ea0-debf-4dbb-89a7-75087fa4d098","Type":"ContainerStarted","Data":"aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995"} Nov 24 17:58:54 crc kubenswrapper[4760]: I1124 17:58:54.554049 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" event={"ID":"01a25ea0-debf-4dbb-89a7-75087fa4d098","Type":"ContainerStarted","Data":"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe"} Nov 24 17:58:54 crc kubenswrapper[4760]: I1124 17:58:54.577557 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" podStartSLOduration=2.8245821810000002 podStartE2EDuration="6.577534529s" podCreationTimestamp="2025-11-24 17:58:48 +0000 UTC" firstStartedPulling="2025-11-24 17:58:49.80724344 +0000 UTC m=+3325.130124990" lastFinishedPulling="2025-11-24 17:58:53.560195798 +0000 UTC m=+3328.883077338" observedRunningTime="2025-11-24 17:58:54.569063427 +0000 UTC m=+3329.891944977" watchObservedRunningTime="2025-11-24 17:58:54.577534529 +0000 UTC m=+3329.900416079" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.245943 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-9p722"] Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.247590 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.356020 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.356116 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88bc8\" (UniqueName: \"kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.457406 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.457521 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88bc8\" (UniqueName: \"kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.457544 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.483132 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88bc8\" (UniqueName: \"kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8\") pod \"crc-debug-9p722\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: I1124 17:58:58.570707 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:58:58 crc kubenswrapper[4760]: W1124 17:58:58.602064 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f6a4963_b650_4f92_8eaa_2bea22756cdb.slice/crio-d4548d7d613cc0c884c313dac6e76e61cdc4dde84e6bc5e2863c4c808b44b888 WatchSource:0}: Error finding container d4548d7d613cc0c884c313dac6e76e61cdc4dde84e6bc5e2863c4c808b44b888: Status 404 returned error can't find the container with id d4548d7d613cc0c884c313dac6e76e61cdc4dde84e6bc5e2863c4c808b44b888 Nov 24 17:58:59 crc kubenswrapper[4760]: I1124 17:58:59.597537 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-9p722" event={"ID":"1f6a4963-b650-4f92-8eaa-2bea22756cdb","Type":"ContainerStarted","Data":"d4548d7d613cc0c884c313dac6e76e61cdc4dde84e6bc5e2863c4c808b44b888"} Nov 24 17:59:09 crc kubenswrapper[4760]: I1124 17:59:09.709323 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-9p722" event={"ID":"1f6a4963-b650-4f92-8eaa-2bea22756cdb","Type":"ContainerStarted","Data":"8d9a9ee11b1767c9623d7c4568db65ba4dc3094c6db5b203ed6908881eb29d03"} Nov 24 17:59:09 crc kubenswrapper[4760]: I1124 17:59:09.737982 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-nz6g9/crc-debug-9p722" podStartSLOduration=1.292553671 podStartE2EDuration="11.737962819s" podCreationTimestamp="2025-11-24 17:58:58 +0000 UTC" firstStartedPulling="2025-11-24 17:58:58.604525479 +0000 UTC m=+3333.927407029" lastFinishedPulling="2025-11-24 17:59:09.049934627 +0000 UTC m=+3344.372816177" observedRunningTime="2025-11-24 17:59:09.728876759 +0000 UTC m=+3345.051758309" watchObservedRunningTime="2025-11-24 17:59:09.737962819 +0000 UTC m=+3345.060844369" Nov 24 17:59:17 crc kubenswrapper[4760]: I1124 17:59:17.311979 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-69f4488969-xwpx8" podUID="37d3f873-9ed8-47d6-b62d-3b007dca3936" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 17:59:53 crc kubenswrapper[4760]: I1124 17:59:53.067328 4760 generic.go:334] "Generic (PLEG): container finished" podID="1f6a4963-b650-4f92-8eaa-2bea22756cdb" containerID="8d9a9ee11b1767c9623d7c4568db65ba4dc3094c6db5b203ed6908881eb29d03" exitCode=0 Nov 24 17:59:53 crc kubenswrapper[4760]: I1124 17:59:53.067401 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-9p722" event={"ID":"1f6a4963-b650-4f92-8eaa-2bea22756cdb","Type":"ContainerDied","Data":"8d9a9ee11b1767c9623d7c4568db65ba4dc3094c6db5b203ed6908881eb29d03"} Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.197125 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.228698 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-9p722"] Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.237103 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-9p722"] Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.285102 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host\") pod \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.285178 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88bc8\" (UniqueName: \"kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8\") pod \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\" (UID: \"1f6a4963-b650-4f92-8eaa-2bea22756cdb\") " Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.285226 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host" (OuterVolumeSpecName: "host") pod "1f6a4963-b650-4f92-8eaa-2bea22756cdb" (UID: "1f6a4963-b650-4f92-8eaa-2bea22756cdb"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.285784 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1f6a4963-b650-4f92-8eaa-2bea22756cdb-host\") on node \"crc\" DevicePath \"\"" Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.292254 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8" (OuterVolumeSpecName: "kube-api-access-88bc8") pod "1f6a4963-b650-4f92-8eaa-2bea22756cdb" (UID: "1f6a4963-b650-4f92-8eaa-2bea22756cdb"). InnerVolumeSpecName "kube-api-access-88bc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:59:54 crc kubenswrapper[4760]: I1124 17:59:54.386979 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88bc8\" (UniqueName: \"kubernetes.io/projected/1f6a4963-b650-4f92-8eaa-2bea22756cdb-kube-api-access-88bc8\") on node \"crc\" DevicePath \"\"" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.095623 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4548d7d613cc0c884c313dac6e76e61cdc4dde84e6bc5e2863c4c808b44b888" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.095713 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-9p722" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.450834 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-vgn7p"] Nov 24 17:59:55 crc kubenswrapper[4760]: E1124 17:59:55.451664 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f6a4963-b650-4f92-8eaa-2bea22756cdb" containerName="container-00" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.451682 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f6a4963-b650-4f92-8eaa-2bea22756cdb" containerName="container-00" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.451968 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f6a4963-b650-4f92-8eaa-2bea22756cdb" containerName="container-00" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.452805 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.480331 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f6a4963-b650-4f92-8eaa-2bea22756cdb" path="/var/lib/kubelet/pods/1f6a4963-b650-4f92-8eaa-2bea22756cdb/volumes" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.610335 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.610651 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck8g5\" (UniqueName: \"kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.713061 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.713235 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.713376 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck8g5\" (UniqueName: \"kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.740274 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck8g5\" (UniqueName: \"kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5\") pod \"crc-debug-vgn7p\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:55 crc kubenswrapper[4760]: I1124 17:59:55.777129 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:56 crc kubenswrapper[4760]: I1124 17:59:56.103856 4760 generic.go:334] "Generic (PLEG): container finished" podID="f201682f-db00-4463-89f9-b011db7b1e07" containerID="f1a981c93d1c5af480642a59cb6c6816d83057f85f1956a7130daec7cf5bb95e" exitCode=0 Nov 24 17:59:56 crc kubenswrapper[4760]: I1124 17:59:56.103943 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" event={"ID":"f201682f-db00-4463-89f9-b011db7b1e07","Type":"ContainerDied","Data":"f1a981c93d1c5af480642a59cb6c6816d83057f85f1956a7130daec7cf5bb95e"} Nov 24 17:59:56 crc kubenswrapper[4760]: I1124 17:59:56.104634 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" event={"ID":"f201682f-db00-4463-89f9-b011db7b1e07","Type":"ContainerStarted","Data":"d0d3787917269e09e9fbb722fc679f718c0b401a16b2c088826d231841ff69c3"} Nov 24 17:59:56 crc kubenswrapper[4760]: I1124 17:59:56.569025 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-vgn7p"] Nov 24 17:59:56 crc kubenswrapper[4760]: I1124 17:59:56.577721 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-vgn7p"] Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.217164 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.341531 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck8g5\" (UniqueName: \"kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5\") pod \"f201682f-db00-4463-89f9-b011db7b1e07\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.341604 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host\") pod \"f201682f-db00-4463-89f9-b011db7b1e07\" (UID: \"f201682f-db00-4463-89f9-b011db7b1e07\") " Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.342572 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host" (OuterVolumeSpecName: "host") pod "f201682f-db00-4463-89f9-b011db7b1e07" (UID: "f201682f-db00-4463-89f9-b011db7b1e07"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.343159 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f201682f-db00-4463-89f9-b011db7b1e07-host\") on node \"crc\" DevicePath \"\"" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.360483 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5" (OuterVolumeSpecName: "kube-api-access-ck8g5") pod "f201682f-db00-4463-89f9-b011db7b1e07" (UID: "f201682f-db00-4463-89f9-b011db7b1e07"). InnerVolumeSpecName "kube-api-access-ck8g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.444666 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck8g5\" (UniqueName: \"kubernetes.io/projected/f201682f-db00-4463-89f9-b011db7b1e07-kube-api-access-ck8g5\") on node \"crc\" DevicePath \"\"" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.480091 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f201682f-db00-4463-89f9-b011db7b1e07" path="/var/lib/kubelet/pods/f201682f-db00-4463-89f9-b011db7b1e07/volumes" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.760160 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-ldpcq"] Nov 24 17:59:57 crc kubenswrapper[4760]: E1124 17:59:57.760878 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f201682f-db00-4463-89f9-b011db7b1e07" containerName="container-00" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.760894 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f201682f-db00-4463-89f9-b011db7b1e07" containerName="container-00" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.761135 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f201682f-db00-4463-89f9-b011db7b1e07" containerName="container-00" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.761770 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.852234 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4pfs\" (UniqueName: \"kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.852515 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.955087 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4pfs\" (UniqueName: \"kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.955243 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.955407 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:57 crc kubenswrapper[4760]: I1124 17:59:57.980774 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4pfs\" (UniqueName: \"kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs\") pod \"crc-debug-ldpcq\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:58 crc kubenswrapper[4760]: I1124 17:59:58.090472 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 17:59:58 crc kubenswrapper[4760]: I1124 17:59:58.129955 4760 scope.go:117] "RemoveContainer" containerID="f1a981c93d1c5af480642a59cb6c6816d83057f85f1956a7130daec7cf5bb95e" Nov 24 17:59:58 crc kubenswrapper[4760]: I1124 17:59:58.129973 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-vgn7p" Nov 24 17:59:59 crc kubenswrapper[4760]: I1124 17:59:59.141925 4760 generic.go:334] "Generic (PLEG): container finished" podID="c7e231cd-4407-430f-80b9-0c0d84757400" containerID="6349e97eb7f2f7556e163a13eee5d0e25acd9ae21b7ca6482bad04eeec3b03a5" exitCode=0 Nov 24 17:59:59 crc kubenswrapper[4760]: I1124 17:59:59.142025 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" event={"ID":"c7e231cd-4407-430f-80b9-0c0d84757400","Type":"ContainerDied","Data":"6349e97eb7f2f7556e163a13eee5d0e25acd9ae21b7ca6482bad04eeec3b03a5"} Nov 24 17:59:59 crc kubenswrapper[4760]: I1124 17:59:59.142457 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" event={"ID":"c7e231cd-4407-430f-80b9-0c0d84757400","Type":"ContainerStarted","Data":"b2da206410efe369a8da1e5682af8126fc6cac6e09186e521c5a4eb115d25a20"} Nov 24 17:59:59 crc kubenswrapper[4760]: I1124 17:59:59.185682 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-ldpcq"] Nov 24 17:59:59 crc kubenswrapper[4760]: I1124 17:59:59.195874 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nz6g9/crc-debug-ldpcq"] Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.197431 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh"] Nov 24 18:00:00 crc kubenswrapper[4760]: E1124 18:00:00.198904 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e231cd-4407-430f-80b9-0c0d84757400" containerName="container-00" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.199027 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e231cd-4407-430f-80b9-0c0d84757400" containerName="container-00" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.199387 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e231cd-4407-430f-80b9-0c0d84757400" containerName="container-00" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.200270 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.201991 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.202420 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.206116 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh"] Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.286119 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.301644 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.301779 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.301856 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfw8t\" (UniqueName: \"kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.403273 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4pfs\" (UniqueName: \"kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs\") pod \"c7e231cd-4407-430f-80b9-0c0d84757400\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.403425 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host\") pod \"c7e231cd-4407-430f-80b9-0c0d84757400\" (UID: \"c7e231cd-4407-430f-80b9-0c0d84757400\") " Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.403732 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfw8t\" (UniqueName: \"kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.403730 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host" (OuterVolumeSpecName: "host") pod "c7e231cd-4407-430f-80b9-0c0d84757400" (UID: "c7e231cd-4407-430f-80b9-0c0d84757400"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.405043 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.404248 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.405192 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.405584 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7e231cd-4407-430f-80b9-0c0d84757400-host\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.412209 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs" (OuterVolumeSpecName: "kube-api-access-b4pfs") pod "c7e231cd-4407-430f-80b9-0c0d84757400" (UID: "c7e231cd-4407-430f-80b9-0c0d84757400"). InnerVolumeSpecName "kube-api-access-b4pfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.422260 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.422735 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfw8t\" (UniqueName: \"kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t\") pod \"collect-profiles-29400120-d5jdh\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.507419 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4pfs\" (UniqueName: \"kubernetes.io/projected/c7e231cd-4407-430f-80b9-0c0d84757400-kube-api-access-b4pfs\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:00 crc kubenswrapper[4760]: I1124 18:00:00.600118 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:01 crc kubenswrapper[4760]: W1124 18:00:01.052746 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf393ce56_8fdb_4607_b5d3_d0e543a118c9.slice/crio-f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7 WatchSource:0}: Error finding container f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7: Status 404 returned error can't find the container with id f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7 Nov 24 18:00:01 crc kubenswrapper[4760]: I1124 18:00:01.053387 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh"] Nov 24 18:00:01 crc kubenswrapper[4760]: I1124 18:00:01.163314 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" event={"ID":"f393ce56-8fdb-4607-b5d3-d0e543a118c9","Type":"ContainerStarted","Data":"f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7"} Nov 24 18:00:01 crc kubenswrapper[4760]: I1124 18:00:01.166107 4760 scope.go:117] "RemoveContainer" containerID="6349e97eb7f2f7556e163a13eee5d0e25acd9ae21b7ca6482bad04eeec3b03a5" Nov 24 18:00:01 crc kubenswrapper[4760]: I1124 18:00:01.166183 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/crc-debug-ldpcq" Nov 24 18:00:01 crc kubenswrapper[4760]: I1124 18:00:01.478140 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7e231cd-4407-430f-80b9-0c0d84757400" path="/var/lib/kubelet/pods/c7e231cd-4407-430f-80b9-0c0d84757400/volumes" Nov 24 18:00:02 crc kubenswrapper[4760]: I1124 18:00:02.177051 4760 generic.go:334] "Generic (PLEG): container finished" podID="f393ce56-8fdb-4607-b5d3-d0e543a118c9" containerID="ddbf838059594d3c558d8806ca92d10323c6765028e1521d6d3aeb09d71f8864" exitCode=0 Nov 24 18:00:02 crc kubenswrapper[4760]: I1124 18:00:02.177102 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" event={"ID":"f393ce56-8fdb-4607-b5d3-d0e543a118c9","Type":"ContainerDied","Data":"ddbf838059594d3c558d8806ca92d10323c6765028e1521d6d3aeb09d71f8864"} Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.533436 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.664395 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume\") pod \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.664462 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume\") pod \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.664662 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfw8t\" (UniqueName: \"kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t\") pod \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\" (UID: \"f393ce56-8fdb-4607-b5d3-d0e543a118c9\") " Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.665867 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume" (OuterVolumeSpecName: "config-volume") pod "f393ce56-8fdb-4607-b5d3-d0e543a118c9" (UID: "f393ce56-8fdb-4607-b5d3-d0e543a118c9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.672597 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t" (OuterVolumeSpecName: "kube-api-access-vfw8t") pod "f393ce56-8fdb-4607-b5d3-d0e543a118c9" (UID: "f393ce56-8fdb-4607-b5d3-d0e543a118c9"). InnerVolumeSpecName "kube-api-access-vfw8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.675213 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f393ce56-8fdb-4607-b5d3-d0e543a118c9" (UID: "f393ce56-8fdb-4607-b5d3-d0e543a118c9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.767032 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfw8t\" (UniqueName: \"kubernetes.io/projected/f393ce56-8fdb-4607-b5d3-d0e543a118c9-kube-api-access-vfw8t\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.767067 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f393ce56-8fdb-4607-b5d3-d0e543a118c9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:03 crc kubenswrapper[4760]: I1124 18:00:03.767078 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f393ce56-8fdb-4607-b5d3-d0e543a118c9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:00:04 crc kubenswrapper[4760]: I1124 18:00:04.196860 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" event={"ID":"f393ce56-8fdb-4607-b5d3-d0e543a118c9","Type":"ContainerDied","Data":"f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7"} Nov 24 18:00:04 crc kubenswrapper[4760]: I1124 18:00:04.197238 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f819307aebf9bf86f63094ff5b1d440bdbbbdda9f0866c2b2cdb1caf5564abb7" Nov 24 18:00:04 crc kubenswrapper[4760]: I1124 18:00:04.196906 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400120-d5jdh" Nov 24 18:00:04 crc kubenswrapper[4760]: I1124 18:00:04.610442 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j"] Nov 24 18:00:04 crc kubenswrapper[4760]: I1124 18:00:04.618315 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400075-jdl8j"] Nov 24 18:00:05 crc kubenswrapper[4760]: I1124 18:00:05.497301 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c" path="/var/lib/kubelet/pods/53aed3e3-383a-4ca7-8c46-ba7a9b2fda0c/volumes" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.538888 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b95dd9bc6-5gb75_0ad3d643-7c73-4b15-966d-d4c7cb1d2438/barbican-api/0.log" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.718959 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b95dd9bc6-5gb75_0ad3d643-7c73-4b15-966d-d4c7cb1d2438/barbican-api-log/0.log" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.741491 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b6b7c6d54-mkcp9_bd4e39bc-5c35-4906-906a-a5558f2861de/barbican-keystone-listener/0.log" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.827741 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b6b7c6d54-mkcp9_bd4e39bc-5c35-4906-906a-a5558f2861de/barbican-keystone-listener-log/0.log" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.964026 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bc9c99c9f-fs95m_4ce07bef-e13b-45e6-ad5e-b7372c3b1432/barbican-worker/0.log" Nov 24 18:00:14 crc kubenswrapper[4760]: I1124 18:00:14.996575 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bc9c99c9f-fs95m_4ce07bef-e13b-45e6-ad5e-b7372c3b1432/barbican-worker-log/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.133104 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm_b163f1e6-048b-4722-bb36-4cd23619b927/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.207415 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/ceilometer-central-agent/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.365685 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/ceilometer-notification-agent/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.492383 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/proxy-httpd/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.581111 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/sg-core/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.620097 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fd87b6ec-9f3d-41ae-9647-6410620a1f4a/cinder-api/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.696123 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fd87b6ec-9f3d-41ae-9647-6410620a1f4a/cinder-api-log/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.831452 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4faf4c98-3f75-4b32-b35d-99e020a71f8c/cinder-scheduler/0.log" Nov 24 18:00:15 crc kubenswrapper[4760]: I1124 18:00:15.868919 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4faf4c98-3f75-4b32-b35d-99e020a71f8c/probe/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.034186 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw_60a5bb95-2a7f-43be-a54f-be0872e8331b/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.079969 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb_37d99b2c-138d-4470-9807-eec5191203a6/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.226658 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/init/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.439497 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/init/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.488462 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr_8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.492381 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/dnsmasq-dns/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.677936 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8531a189-02f3-4e03-8fca-ff113990ee3e/glance-httpd/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.713709 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8531a189-02f3-4e03-8fca-ff113990ee3e/glance-log/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.858741 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_74e069c9-8459-4455-b520-fa8ba79bb677/glance-log/0.log" Nov 24 18:00:16 crc kubenswrapper[4760]: I1124 18:00:16.870984 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_74e069c9-8459-4455-b520-fa8ba79bb677/glance-httpd/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.032708 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-bc766455b-9dfnr_20fc1526-eb8d-424b-b03a-784154b5d7fa/horizon/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.161655 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh_2522087b-33dd-418b-abb2-813ca0f5a051/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.381465 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sxvjh_a58b5d43-9b4d-4061-96e1-e02c61a4630c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.422498 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-bc766455b-9dfnr_20fc1526-eb8d-424b-b03a-784154b5d7fa/horizon-log/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.656103 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_33972ca1-3846-487a-a8b0-fb67093b1a6d/kube-state-metrics/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.696072 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5ccbbc7984-m6jkp_1fd753d3-759a-4734-96ac-9c7f1a9138fa/keystone-api/0.log" Nov 24 18:00:17 crc kubenswrapper[4760]: I1124 18:00:17.865935 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l_d89f2f80-b7b0-49b2-beab-c4fd2d17352f/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:18 crc kubenswrapper[4760]: I1124 18:00:18.270419 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b5b8bc889-kqfhp_f4823e15-ce2c-4a16-b80e-f676469b3624/neutron-httpd/0.log" Nov 24 18:00:18 crc kubenswrapper[4760]: I1124 18:00:18.281274 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b5b8bc889-kqfhp_f4823e15-ce2c-4a16-b80e-f676469b3624/neutron-api/0.log" Nov 24 18:00:18 crc kubenswrapper[4760]: I1124 18:00:18.523790 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7_e88df757-ac39-4a30-b0aa-eb820708e3b4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.005978 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_d5b9106d-4aee-4439-9c7d-41c1f015fd02/nova-cell0-conductor-conductor/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.026474 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6fcad265-b82c-400e-afce-ac2afac950d0/nova-api-log/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.057032 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6fcad265-b82c-400e-afce-ac2afac950d0/nova-api-api/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.348408 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_61870fa4-1b0e-450c-a2a8-06d3ba20cd3e/nova-cell1-conductor-conductor/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.404049 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d00b658b-a227-4c0c-9f91-d1c09d5f6173/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.572509 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r5fgt_cd29f6ba-13bc-4598-a031-18c0763458dc/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:19 crc kubenswrapper[4760]: I1124 18:00:19.776018 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fa127b75-3942-4fba-815f-197979d77117/nova-metadata-log/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.061391 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_f54be0fa-3248-4732-b118-546367054335/nova-scheduler-scheduler/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.072551 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/mysql-bootstrap/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.279470 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/mysql-bootstrap/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.285599 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/galera/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.464689 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/mysql-bootstrap/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.723238 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/mysql-bootstrap/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.787553 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/galera/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.914613 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_dcc1106b-ca31-4432-948b-f01f5f47c370/openstackclient/0.log" Nov 24 18:00:20 crc kubenswrapper[4760]: I1124 18:00:20.930568 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fa127b75-3942-4fba-815f-197979d77117/nova-metadata-metadata/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.090431 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-dl9cm_39e10c47-4e85-46de-a754-3ee0245718d7/ovn-controller/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.180645 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sq2th_f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a/openstack-network-exporter/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.367081 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server-init/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.613389 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovs-vswitchd/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.640956 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.647929 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server-init/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.845252 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_53891de3-058b-46b8-b7f4-880ca70c1de3/openstack-network-exporter/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.894040 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vb5mp_5e897692-730b-402f-a1a7-5f242a36fe2b/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:21 crc kubenswrapper[4760]: I1124 18:00:21.922919 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_53891de3-058b-46b8-b7f4-880ca70c1de3/ovn-northd/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.108272 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20f2f51e-4902-44f9-97d6-1ebf12c22ad6/openstack-network-exporter/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.198454 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20f2f51e-4902-44f9-97d6-1ebf12c22ad6/ovsdbserver-nb/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.350264 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b/ovsdbserver-sb/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.402453 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b/openstack-network-exporter/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.457933 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8cf89787b-dxmqp_412c6295-ae70-4706-9c7b-88c4025c9579/placement-api/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.669481 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8cf89787b-dxmqp_412c6295-ae70-4706-9c7b-88c4025c9579/placement-log/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.679477 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/setup-container/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.941393 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/setup-container/0.log" Nov 24 18:00:22 crc kubenswrapper[4760]: I1124 18:00:22.954959 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/setup-container/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.039183 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/rabbitmq/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.176530 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/setup-container/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.205366 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/rabbitmq/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.303160 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5_7010932f-cdb9-47d9-8674-07778eda876d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.417243 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-7jrgz_bcfd976b-0081-44f8-b0f4-2ca0e2372299/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.509582 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh_bf199ff4-4624-4608-8b45-72a6f1437473/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.650431 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-7hvxr_7f9d57fb-8bae-4055-aa31-d14b9cd38b62/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.724428 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-2jq4r_0bad4d45-bcec-460e-b393-2c8841842af8/ssh-known-hosts-edpm-deployment/0.log" Nov 24 18:00:23 crc kubenswrapper[4760]: I1124 18:00:23.932093 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-69f4488969-xwpx8_37d3f873-9ed8-47d6-b62d-3b007dca3936/proxy-server/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.065803 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-69f4488969-xwpx8_37d3f873-9ed8-47d6-b62d-3b007dca3936/proxy-httpd/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.092095 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-d9mv7_d0bcc362-6648-4630-b41b-610209865eea/swift-ring-rebalance/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.255492 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-auditor/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.293905 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-reaper/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.337779 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-replicator/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.495662 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-auditor/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.522052 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-server/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.526278 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-replicator/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.529357 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-server/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.674919 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-updater/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.734470 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-auditor/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.761124 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-replicator/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.781693 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-expirer/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.907433 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-updater/0.log" Nov 24 18:00:24 crc kubenswrapper[4760]: I1124 18:00:24.910988 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-server/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.000517 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/rsync/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.060996 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/swift-recon-cron/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.198902 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q_54204c3b-38f8-4e55-a645-b8c60b762c89/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.342094 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01/tempest-tests-tempest-tests-runner/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.509253 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_d89a1dea-6dac-4fd1-bb44-55076bd67fba/test-operator-logs-container/0.log" Nov 24 18:00:25 crc kubenswrapper[4760]: I1124 18:00:25.634260 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-fx279_a548ab89-b523-4f50-b490-7470e05662b6/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:00:33 crc kubenswrapper[4760]: I1124 18:00:33.500133 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_5645a4cb-e092-4b3f-a704-c3497f304e80/memcached/0.log" Nov 24 18:00:34 crc kubenswrapper[4760]: I1124 18:00:34.019802 4760 scope.go:117] "RemoveContainer" containerID="5b8ec8de9bfca3b92ae9a7c10d4576c96506bf9e28d6e5a320584102ea6835ca" Nov 24 18:00:35 crc kubenswrapper[4760]: I1124 18:00:35.642749 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:00:35 crc kubenswrapper[4760]: I1124 18:00:35.643003 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.659405 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-f97qw_c07ab946-dbd4-4fbf-b17c-7bfa133e1c96/kube-rbac-proxy/0.log" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.708242 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-f97qw_c07ab946-dbd4-4fbf-b17c-7bfa133e1c96/manager/0.log" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.799691 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-9fscr_8a48d8a2-3c00-4a6e-b88f-dab093355874/kube-rbac-proxy/0.log" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.888440 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-9fscr_8a48d8a2-3c00-4a6e-b88f-dab093355874/manager/0.log" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.987699 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-6tfrh_df86f3d1-75ea-4757-8115-1440d92160b6/manager/0.log" Nov 24 18:00:48 crc kubenswrapper[4760]: I1124 18:00:48.998133 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-6tfrh_df86f3d1-75ea-4757-8115-1440d92160b6/kube-rbac-proxy/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.152158 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.283153 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.284404 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.298318 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.475715 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/extract/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.479498 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.489054 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.684623 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-znhd6_e3c878c9-0549-4e8b-bb1a-2754b8a8d402/kube-rbac-proxy/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.720786 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-dm4k7_abd30b3d-1e1d-4a1d-b4b6-aaf500949015/kube-rbac-proxy/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.748331 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-znhd6_e3c878c9-0549-4e8b-bb1a-2754b8a8d402/manager/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.899660 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-dm4k7_abd30b3d-1e1d-4a1d-b4b6-aaf500949015/manager/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.913529 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-95w5b_981e3771-3dd1-4e3d-9601-7c16bbc22c8f/kube-rbac-proxy/0.log" Nov 24 18:00:49 crc kubenswrapper[4760]: I1124 18:00:49.952629 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-95w5b_981e3771-3dd1-4e3d-9601-7c16bbc22c8f/manager/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.149333 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-jxvzz_11f165ab-07bd-46ce-ad35-5b349c9b16be/kube-rbac-proxy/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.286701 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-jxvzz_11f165ab-07bd-46ce-ad35-5b349c9b16be/manager/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.329112 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-zd54m_93232e72-070f-4a46-89da-983cd8abe0b5/kube-rbac-proxy/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.407176 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-zd54m_93232e72-070f-4a46-89da-983cd8abe0b5/manager/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.481212 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-wgd79_c0da29f6-094e-499d-90ea-93ddfe52e165/kube-rbac-proxy/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.574047 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-wgd79_c0da29f6-094e-499d-90ea-93ddfe52e165/manager/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.629260 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-87pfs_d7eea786-ecee-41f0-9a52-7ac9bef2f874/kube-rbac-proxy/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.731932 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-87pfs_d7eea786-ecee-41f0-9a52-7ac9bef2f874/manager/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.847363 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-fl68l_a29a1da0-a007-4d2d-8ca2-0a3f78e4d995/kube-rbac-proxy/0.log" Nov 24 18:00:50 crc kubenswrapper[4760]: I1124 18:00:50.858244 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-fl68l_a29a1da0-a007-4d2d-8ca2-0a3f78e4d995/manager/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.017225 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-j6f4z_dc1dfda1-793b-4b06-a228-0e5472915f76/kube-rbac-proxy/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.075700 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-j6f4z_dc1dfda1-793b-4b06-a228-0e5472915f76/manager/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.165134 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-r7dzv_51bd5ae4-002b-40c4-bd9e-b6d087bfdaba/kube-rbac-proxy/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.271653 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-r7dzv_51bd5ae4-002b-40c4-bd9e-b6d087bfdaba/manager/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.338309 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-hn7wz_f43007f0-7615-44a1-8594-dd0b0adbded6/kube-rbac-proxy/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.394783 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-hn7wz_f43007f0-7615-44a1-8594-dd0b0adbded6/manager/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.502671 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm_2ebc4c96-b0e9-4f9f-950b-5af42b867a8a/kube-rbac-proxy/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.526576 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm_2ebc4c96-b0e9-4f9f-950b-5af42b867a8a/manager/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.701182 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d5d9ddcff-zjhwp_ca3d8449-fd16-491b-bd2e-06dcd9103bdf/kube-rbac-proxy/0.log" Nov 24 18:00:51 crc kubenswrapper[4760]: I1124 18:00:51.847387 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-d5ff89cd9-mtcgb_20782ce3-a28a-4fa7-a4c1-ae186c4e9f44/kube-rbac-proxy/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.106911 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-d5ff89cd9-mtcgb_20782ce3-a28a-4fa7-a4c1-ae186c4e9f44/operator/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.107592 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7jdkl_baed48fd-5a3a-482e-a24e-2aff550b63dc/registry-server/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.295155 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-tjtzh_3ead61e1-d87a-44bb-8144-3198f06976c4/kube-rbac-proxy/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.420035 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-tjtzh_3ead61e1-d87a-44bb-8144-3198f06976c4/manager/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.543973 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:00:52 crc kubenswrapper[4760]: E1124 18:00:52.544646 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f393ce56-8fdb-4607-b5d3-d0e543a118c9" containerName="collect-profiles" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.544732 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="f393ce56-8fdb-4607-b5d3-d0e543a118c9" containerName="collect-profiles" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.545050 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="f393ce56-8fdb-4607-b5d3-d0e543a118c9" containerName="collect-profiles" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.546622 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.570558 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-9cxcs_b075e65d-1bff-4853-9f78-339a20dde0d8/kube-rbac-proxy/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.576108 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.708146 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f85tx\" (UniqueName: \"kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.708232 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.708379 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.745211 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-9cxcs_b075e65d-1bff-4853-9f78-339a20dde0d8/manager/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.810170 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f85tx\" (UniqueName: \"kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.810259 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.810423 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.811383 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.811544 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.830640 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f85tx\" (UniqueName: \"kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx\") pod \"community-operators-dqk42\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.881486 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.882019 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-b26r2_df2ff3b3-46c0-4a51-bac9-e19df21c24fa/operator/0.log" Nov 24 18:00:52 crc kubenswrapper[4760]: I1124 18:00:52.974702 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d5d9ddcff-zjhwp_ca3d8449-fd16-491b-bd2e-06dcd9103bdf/manager/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.209586 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-mnszq_43e5759b-21f0-45be-a96b-c0c86229273f/kube-rbac-proxy/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.289891 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-mnszq_43e5759b-21f0-45be-a96b-c0c86229273f/manager/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.447562 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d7678447-gcrcj_12583812-acca-4939-9358-17b4bb668450/kube-rbac-proxy/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.540378 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.549282 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d7678447-gcrcj_12583812-acca-4939-9358-17b4bb668450/manager/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.556681 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-qn926_5b5f6f3c-636d-4507-8c3d-51c1ac4693d6/kube-rbac-proxy/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.669620 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-qn926_5b5f6f3c-636d-4507-8c3d-51c1ac4693d6/manager/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.717883 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-8jmvq_6a3853ba-f14b-4d13-96c5-7b7a590086ca/kube-rbac-proxy/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.795795 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-8jmvq_6a3853ba-f14b-4d13-96c5-7b7a590086ca/manager/0.log" Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.999614 4760 generic.go:334] "Generic (PLEG): container finished" podID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerID="7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c" exitCode=0 Nov 24 18:00:53 crc kubenswrapper[4760]: I1124 18:00:53.999682 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerDied","Data":"7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c"} Nov 24 18:00:54 crc kubenswrapper[4760]: I1124 18:00:53.999708 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerStarted","Data":"51e9c4d4c8ddf234021d486905d33bbc8ae881fb02d0753cb55768e7d1702849"} Nov 24 18:00:55 crc kubenswrapper[4760]: I1124 18:00:55.009713 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerStarted","Data":"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338"} Nov 24 18:00:56 crc kubenswrapper[4760]: I1124 18:00:56.021321 4760 generic.go:334] "Generic (PLEG): container finished" podID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerID="49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338" exitCode=0 Nov 24 18:00:56 crc kubenswrapper[4760]: I1124 18:00:56.021363 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerDied","Data":"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338"} Nov 24 18:00:57 crc kubenswrapper[4760]: I1124 18:00:57.037639 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerStarted","Data":"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28"} Nov 24 18:00:57 crc kubenswrapper[4760]: I1124 18:00:57.061621 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dqk42" podStartSLOduration=2.62251662 podStartE2EDuration="5.061605781s" podCreationTimestamp="2025-11-24 18:00:52 +0000 UTC" firstStartedPulling="2025-11-24 18:00:54.001380249 +0000 UTC m=+3449.324261799" lastFinishedPulling="2025-11-24 18:00:56.44046941 +0000 UTC m=+3451.763350960" observedRunningTime="2025-11-24 18:00:57.058380798 +0000 UTC m=+3452.381262348" watchObservedRunningTime="2025-11-24 18:00:57.061605781 +0000 UTC m=+3452.384487331" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.148694 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29400121-ttct7"] Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.151260 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.158813 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29400121-ttct7"] Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.250682 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqm4s\" (UniqueName: \"kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.250781 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.250947 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.251125 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.352762 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.352857 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqm4s\" (UniqueName: \"kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.352905 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.352941 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.361965 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.362030 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.363524 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.371392 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqm4s\" (UniqueName: \"kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s\") pod \"keystone-cron-29400121-ttct7\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.473525 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:00 crc kubenswrapper[4760]: I1124 18:01:00.969488 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29400121-ttct7"] Nov 24 18:01:01 crc kubenswrapper[4760]: I1124 18:01:01.081194 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29400121-ttct7" event={"ID":"984bae89-5644-4859-ab94-6f00104349eb","Type":"ContainerStarted","Data":"f89bead7c0e16365e5237d27336f41b4796a6ab43a1c65e249a0b6297a2df8a4"} Nov 24 18:01:02 crc kubenswrapper[4760]: I1124 18:01:02.090071 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29400121-ttct7" event={"ID":"984bae89-5644-4859-ab94-6f00104349eb","Type":"ContainerStarted","Data":"f5d7f19c64126f606bdafe413c75c9af3f57f7871095e1e50d220e4f46af4aa2"} Nov 24 18:01:02 crc kubenswrapper[4760]: I1124 18:01:02.114765 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29400121-ttct7" podStartSLOduration=2.114744115 podStartE2EDuration="2.114744115s" podCreationTimestamp="2025-11-24 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:01:02.107284701 +0000 UTC m=+3457.430166251" watchObservedRunningTime="2025-11-24 18:01:02.114744115 +0000 UTC m=+3457.437625665" Nov 24 18:01:02 crc kubenswrapper[4760]: I1124 18:01:02.882669 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:02 crc kubenswrapper[4760]: I1124 18:01:02.883060 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:02 crc kubenswrapper[4760]: I1124 18:01:02.933578 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:03 crc kubenswrapper[4760]: I1124 18:01:03.159551 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:03 crc kubenswrapper[4760]: I1124 18:01:03.203804 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.114754 4760 generic.go:334] "Generic (PLEG): container finished" podID="984bae89-5644-4859-ab94-6f00104349eb" containerID="f5d7f19c64126f606bdafe413c75c9af3f57f7871095e1e50d220e4f46af4aa2" exitCode=0 Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.114844 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29400121-ttct7" event={"ID":"984bae89-5644-4859-ab94-6f00104349eb","Type":"ContainerDied","Data":"f5d7f19c64126f606bdafe413c75c9af3f57f7871095e1e50d220e4f46af4aa2"} Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.115257 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dqk42" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="registry-server" containerID="cri-o://7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28" gracePeriod=2 Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.573188 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.643647 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.643980 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.669065 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities\") pod \"4f32d028-b48f-4821-aa8c-c5844212aa5a\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.669266 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f85tx\" (UniqueName: \"kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx\") pod \"4f32d028-b48f-4821-aa8c-c5844212aa5a\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.669286 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content\") pod \"4f32d028-b48f-4821-aa8c-c5844212aa5a\" (UID: \"4f32d028-b48f-4821-aa8c-c5844212aa5a\") " Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.670053 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities" (OuterVolumeSpecName: "utilities") pod "4f32d028-b48f-4821-aa8c-c5844212aa5a" (UID: "4f32d028-b48f-4821-aa8c-c5844212aa5a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.675211 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx" (OuterVolumeSpecName: "kube-api-access-f85tx") pod "4f32d028-b48f-4821-aa8c-c5844212aa5a" (UID: "4f32d028-b48f-4821-aa8c-c5844212aa5a"). InnerVolumeSpecName "kube-api-access-f85tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.719961 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f32d028-b48f-4821-aa8c-c5844212aa5a" (UID: "4f32d028-b48f-4821-aa8c-c5844212aa5a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.771071 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f85tx\" (UniqueName: \"kubernetes.io/projected/4f32d028-b48f-4821-aa8c-c5844212aa5a-kube-api-access-f85tx\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.771117 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:05 crc kubenswrapper[4760]: I1124 18:01:05.771129 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f32d028-b48f-4821-aa8c-c5844212aa5a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.126621 4760 generic.go:334] "Generic (PLEG): container finished" podID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerID="7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28" exitCode=0 Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.126672 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dqk42" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.126769 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerDied","Data":"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28"} Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.126802 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dqk42" event={"ID":"4f32d028-b48f-4821-aa8c-c5844212aa5a","Type":"ContainerDied","Data":"51e9c4d4c8ddf234021d486905d33bbc8ae881fb02d0753cb55768e7d1702849"} Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.126821 4760 scope.go:117] "RemoveContainer" containerID="7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.163943 4760 scope.go:117] "RemoveContainer" containerID="49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.168806 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.178406 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dqk42"] Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.190387 4760 scope.go:117] "RemoveContainer" containerID="7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.266611 4760 scope.go:117] "RemoveContainer" containerID="7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28" Nov 24 18:01:06 crc kubenswrapper[4760]: E1124 18:01:06.268152 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28\": container with ID starting with 7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28 not found: ID does not exist" containerID="7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.268201 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28"} err="failed to get container status \"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28\": rpc error: code = NotFound desc = could not find container \"7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28\": container with ID starting with 7c86699e8483525a828be6aa4cd984d55db11e3e60460d03e0acc33510af2d28 not found: ID does not exist" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.268231 4760 scope.go:117] "RemoveContainer" containerID="49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338" Nov 24 18:01:06 crc kubenswrapper[4760]: E1124 18:01:06.268523 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338\": container with ID starting with 49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338 not found: ID does not exist" containerID="49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.268543 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338"} err="failed to get container status \"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338\": rpc error: code = NotFound desc = could not find container \"49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338\": container with ID starting with 49fb2bb1acef351862025d5d94ba1bad978fc4bf4c026f23d08e89794808f338 not found: ID does not exist" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.268560 4760 scope.go:117] "RemoveContainer" containerID="7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c" Nov 24 18:01:06 crc kubenswrapper[4760]: E1124 18:01:06.270136 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c\": container with ID starting with 7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c not found: ID does not exist" containerID="7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.270158 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c"} err="failed to get container status \"7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c\": rpc error: code = NotFound desc = could not find container \"7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c\": container with ID starting with 7b9e45026ff0c8d1dd55e2cd2ceba9fd584f10400043f417662c6db5b2e28a8c not found: ID does not exist" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.518755 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.582882 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle\") pod \"984bae89-5644-4859-ab94-6f00104349eb\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.582994 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data\") pod \"984bae89-5644-4859-ab94-6f00104349eb\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.583042 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys\") pod \"984bae89-5644-4859-ab94-6f00104349eb\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.583159 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqm4s\" (UniqueName: \"kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s\") pod \"984bae89-5644-4859-ab94-6f00104349eb\" (UID: \"984bae89-5644-4859-ab94-6f00104349eb\") " Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.595865 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "984bae89-5644-4859-ab94-6f00104349eb" (UID: "984bae89-5644-4859-ab94-6f00104349eb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.595876 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s" (OuterVolumeSpecName: "kube-api-access-jqm4s") pod "984bae89-5644-4859-ab94-6f00104349eb" (UID: "984bae89-5644-4859-ab94-6f00104349eb"). InnerVolumeSpecName "kube-api-access-jqm4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.614368 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "984bae89-5644-4859-ab94-6f00104349eb" (UID: "984bae89-5644-4859-ab94-6f00104349eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.642831 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data" (OuterVolumeSpecName: "config-data") pod "984bae89-5644-4859-ab94-6f00104349eb" (UID: "984bae89-5644-4859-ab94-6f00104349eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.686788 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqm4s\" (UniqueName: \"kubernetes.io/projected/984bae89-5644-4859-ab94-6f00104349eb-kube-api-access-jqm4s\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.687109 4760 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.687120 4760 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:06 crc kubenswrapper[4760]: I1124 18:01:06.687129 4760 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/984bae89-5644-4859-ab94-6f00104349eb-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 18:01:07 crc kubenswrapper[4760]: I1124 18:01:07.136741 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29400121-ttct7" event={"ID":"984bae89-5644-4859-ab94-6f00104349eb","Type":"ContainerDied","Data":"f89bead7c0e16365e5237d27336f41b4796a6ab43a1c65e249a0b6297a2df8a4"} Nov 24 18:01:07 crc kubenswrapper[4760]: I1124 18:01:07.136788 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f89bead7c0e16365e5237d27336f41b4796a6ab43a1c65e249a0b6297a2df8a4" Nov 24 18:01:07 crc kubenswrapper[4760]: I1124 18:01:07.136801 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29400121-ttct7" Nov 24 18:01:07 crc kubenswrapper[4760]: I1124 18:01:07.476533 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" path="/var/lib/kubelet/pods/4f32d028-b48f-4821-aa8c-c5844212aa5a/volumes" Nov 24 18:01:10 crc kubenswrapper[4760]: I1124 18:01:10.471093 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-sldtw_a67e9663-0794-412b-b976-c0c50f39184e/control-plane-machine-set-operator/0.log" Nov 24 18:01:10 crc kubenswrapper[4760]: I1124 18:01:10.643274 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rt9dn_843e455c-4df4-4e25-91f1-456b61889db5/machine-api-operator/0.log" Nov 24 18:01:10 crc kubenswrapper[4760]: I1124 18:01:10.656606 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rt9dn_843e455c-4df4-4e25-91f1-456b61889db5/kube-rbac-proxy/0.log" Nov 24 18:01:22 crc kubenswrapper[4760]: I1124 18:01:22.142815 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-xtps2_bc25f619-5720-41cd-9fe6-beb030debe00/cert-manager-controller/0.log" Nov 24 18:01:22 crc kubenswrapper[4760]: I1124 18:01:22.306386 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-qn2xj_e64b9328-142b-47be-a2f9-9c2339244683/cert-manager-cainjector/0.log" Nov 24 18:01:22 crc kubenswrapper[4760]: I1124 18:01:22.348163 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-dcdmq_32a75cd6-7dcc-409b-9208-86578c121ec7/cert-manager-webhook/0.log" Nov 24 18:01:33 crc kubenswrapper[4760]: I1124 18:01:33.696540 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-w6tj7_5228db69-23c1-48fa-a89f-a4e0459bcdec/nmstate-console-plugin/0.log" Nov 24 18:01:33 crc kubenswrapper[4760]: I1124 18:01:33.847399 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nl86x_fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3/nmstate-handler/0.log" Nov 24 18:01:33 crc kubenswrapper[4760]: I1124 18:01:33.914017 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-tgfj2_c02c0ef6-ec2f-4554-89d5-95ccd5a9af05/kube-rbac-proxy/0.log" Nov 24 18:01:33 crc kubenswrapper[4760]: I1124 18:01:33.931472 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-tgfj2_c02c0ef6-ec2f-4554-89d5-95ccd5a9af05/nmstate-metrics/0.log" Nov 24 18:01:34 crc kubenswrapper[4760]: I1124 18:01:34.040653 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-zn9lc_60a6fd21-1e4a-4eab-940a-157de6e7236e/nmstate-operator/0.log" Nov 24 18:01:34 crc kubenswrapper[4760]: I1124 18:01:34.132240 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-dgm2z_2a8eb5c7-a2fa-4029-9d10-9ef82f358506/nmstate-webhook/0.log" Nov 24 18:01:35 crc kubenswrapper[4760]: I1124 18:01:35.643053 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:01:35 crc kubenswrapper[4760]: I1124 18:01:35.643323 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:01:35 crc kubenswrapper[4760]: I1124 18:01:35.643356 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 18:01:35 crc kubenswrapper[4760]: I1124 18:01:35.645218 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:01:35 crc kubenswrapper[4760]: I1124 18:01:35.645312 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b" gracePeriod=600 Nov 24 18:01:36 crc kubenswrapper[4760]: I1124 18:01:36.374188 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b" exitCode=0 Nov 24 18:01:36 crc kubenswrapper[4760]: I1124 18:01:36.374767 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b"} Nov 24 18:01:36 crc kubenswrapper[4760]: I1124 18:01:36.374905 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b"} Nov 24 18:01:36 crc kubenswrapper[4760]: I1124 18:01:36.375052 4760 scope.go:117] "RemoveContainer" containerID="0271ba869ed865a70f87d1af558f937693ca5223233a47505af94b17d36de03f" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.201203 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-r22lk_b2d37ad6-a6ac-4c40-82e2-4eb9319e9244/kube-rbac-proxy/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.289198 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-r22lk_b2d37ad6-a6ac-4c40-82e2-4eb9319e9244/controller/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.413760 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.623494 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.649468 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.656237 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.670990 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.797983 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.828491 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.852846 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:01:47 crc kubenswrapper[4760]: I1124 18:01:47.880923 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.046801 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.072545 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.073047 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/controller/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.104301 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.280782 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/frr-metrics/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.301066 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/kube-rbac-proxy/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.363352 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/kube-rbac-proxy-frr/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.510859 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/reloader/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.610494 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-qk694_858bafc0-44a3-4e65-9a8f-0da3e8d6f624/frr-k8s-webhook-server/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.839770 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6cbb78f8d9-xsz89_9608978e-3402-4aa6-97aa-c15d47a81890/manager/0.log" Nov 24 18:01:48 crc kubenswrapper[4760]: I1124 18:01:48.951335 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-558dddbf45-rtbw7_9c952901-3384-4ff2-a54a-28b709c934a7/webhook-server/0.log" Nov 24 18:01:49 crc kubenswrapper[4760]: I1124 18:01:49.029907 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z7h4x_576e8cbe-2d96-43c5-a62c-d4f22abdc21a/kube-rbac-proxy/0.log" Nov 24 18:01:49 crc kubenswrapper[4760]: I1124 18:01:49.529396 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/frr/0.log" Nov 24 18:01:49 crc kubenswrapper[4760]: I1124 18:01:49.605887 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z7h4x_576e8cbe-2d96-43c5-a62c-d4f22abdc21a/speaker/0.log" Nov 24 18:02:01 crc kubenswrapper[4760]: I1124 18:02:01.627238 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:02:01 crc kubenswrapper[4760]: I1124 18:02:01.785037 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:02:01 crc kubenswrapper[4760]: I1124 18:02:01.802411 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:02:01 crc kubenswrapper[4760]: I1124 18:02:01.863303 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.111606 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.164826 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/extract/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.165202 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.303135 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.558917 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.602688 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.609870 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.847398 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:02:02 crc kubenswrapper[4760]: I1124 18:02:02.961255 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.144781 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.152135 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/registry-server/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.305942 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.322258 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.345471 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.522447 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.576769 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:02:03 crc kubenswrapper[4760]: I1124 18:02:03.742621 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.110041 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.134980 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.139390 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/registry-server/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.186286 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.466321 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/extract/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.523444 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.534160 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.672291 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-jfn9q_3947261e-1d34-46c9-a769-f71d6e03f7d1/marketplace-operator/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.760893 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.893418 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.938541 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:02:04 crc kubenswrapper[4760]: I1124 18:02:04.965330 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.106553 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.143819 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.234704 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/registry-server/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.315691 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.544954 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.560779 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.573628 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.790210 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:02:05 crc kubenswrapper[4760]: I1124 18:02:05.809137 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:02:06 crc kubenswrapper[4760]: I1124 18:02:06.218749 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/registry-server/0.log" Nov 24 18:02:22 crc kubenswrapper[4760]: E1124 18:02:22.407826 4760 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.230:44784->38.102.83.230:45313: read tcp 38.102.83.230:44784->38.102.83.230:45313: read: connection reset by peer Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.542486 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:44 crc kubenswrapper[4760]: E1124 18:02:44.543359 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="registry-server" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543385 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="registry-server" Nov 24 18:02:44 crc kubenswrapper[4760]: E1124 18:02:44.543420 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="extract-content" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543426 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="extract-content" Nov 24 18:02:44 crc kubenswrapper[4760]: E1124 18:02:44.543439 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="984bae89-5644-4859-ab94-6f00104349eb" containerName="keystone-cron" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543445 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="984bae89-5644-4859-ab94-6f00104349eb" containerName="keystone-cron" Nov 24 18:02:44 crc kubenswrapper[4760]: E1124 18:02:44.543462 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="extract-utilities" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543468 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="extract-utilities" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543698 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f32d028-b48f-4821-aa8c-c5844212aa5a" containerName="registry-server" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.543711 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="984bae89-5644-4859-ab94-6f00104349eb" containerName="keystone-cron" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.545307 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.553352 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.669492 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.669659 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md2tx\" (UniqueName: \"kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.669703 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.771180 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md2tx\" (UniqueName: \"kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.771491 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.771576 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.771952 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.771977 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.789667 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md2tx\" (UniqueName: \"kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx\") pod \"certified-operators-xnf9s\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:44 crc kubenswrapper[4760]: I1124 18:02:44.877562 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:45 crc kubenswrapper[4760]: I1124 18:02:45.441639 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:45 crc kubenswrapper[4760]: W1124 18:02:45.454062 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbad2b6d_0634_4bf4_8217_37985bba54fb.slice/crio-d76d504e0f49533478a4351fc0833e449afb1ea5d1568857ada42040e5eb38f7 WatchSource:0}: Error finding container d76d504e0f49533478a4351fc0833e449afb1ea5d1568857ada42040e5eb38f7: Status 404 returned error can't find the container with id d76d504e0f49533478a4351fc0833e449afb1ea5d1568857ada42040e5eb38f7 Nov 24 18:02:46 crc kubenswrapper[4760]: I1124 18:02:46.019785 4760 generic.go:334] "Generic (PLEG): container finished" podID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerID="36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b" exitCode=0 Nov 24 18:02:46 crc kubenswrapper[4760]: I1124 18:02:46.019860 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerDied","Data":"36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b"} Nov 24 18:02:46 crc kubenswrapper[4760]: I1124 18:02:46.020106 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerStarted","Data":"d76d504e0f49533478a4351fc0833e449afb1ea5d1568857ada42040e5eb38f7"} Nov 24 18:02:47 crc kubenswrapper[4760]: I1124 18:02:47.031675 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerStarted","Data":"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1"} Nov 24 18:02:48 crc kubenswrapper[4760]: I1124 18:02:48.042953 4760 generic.go:334] "Generic (PLEG): container finished" podID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerID="c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1" exitCode=0 Nov 24 18:02:48 crc kubenswrapper[4760]: I1124 18:02:48.043062 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerDied","Data":"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1"} Nov 24 18:02:50 crc kubenswrapper[4760]: I1124 18:02:50.068087 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerStarted","Data":"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5"} Nov 24 18:02:50 crc kubenswrapper[4760]: I1124 18:02:50.085961 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xnf9s" podStartSLOduration=2.626419501 podStartE2EDuration="6.085938914s" podCreationTimestamp="2025-11-24 18:02:44 +0000 UTC" firstStartedPulling="2025-11-24 18:02:46.022729593 +0000 UTC m=+3561.345611143" lastFinishedPulling="2025-11-24 18:02:49.482249006 +0000 UTC m=+3564.805130556" observedRunningTime="2025-11-24 18:02:50.082493016 +0000 UTC m=+3565.405374576" watchObservedRunningTime="2025-11-24 18:02:50.085938914 +0000 UTC m=+3565.408820464" Nov 24 18:02:54 crc kubenswrapper[4760]: I1124 18:02:54.877838 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:54 crc kubenswrapper[4760]: I1124 18:02:54.879686 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:54 crc kubenswrapper[4760]: I1124 18:02:54.936864 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:55 crc kubenswrapper[4760]: I1124 18:02:55.169487 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:55 crc kubenswrapper[4760]: I1124 18:02:55.216030 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.131475 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xnf9s" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="registry-server" containerID="cri-o://0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5" gracePeriod=2 Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.621105 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.726246 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities\") pod \"fbad2b6d-0634-4bf4-8217-37985bba54fb\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.726362 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content\") pod \"fbad2b6d-0634-4bf4-8217-37985bba54fb\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.726488 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-md2tx\" (UniqueName: \"kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx\") pod \"fbad2b6d-0634-4bf4-8217-37985bba54fb\" (UID: \"fbad2b6d-0634-4bf4-8217-37985bba54fb\") " Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.727323 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities" (OuterVolumeSpecName: "utilities") pod "fbad2b6d-0634-4bf4-8217-37985bba54fb" (UID: "fbad2b6d-0634-4bf4-8217-37985bba54fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.732035 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx" (OuterVolumeSpecName: "kube-api-access-md2tx") pod "fbad2b6d-0634-4bf4-8217-37985bba54fb" (UID: "fbad2b6d-0634-4bf4-8217-37985bba54fb"). InnerVolumeSpecName "kube-api-access-md2tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.779803 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fbad2b6d-0634-4bf4-8217-37985bba54fb" (UID: "fbad2b6d-0634-4bf4-8217-37985bba54fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.828761 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-md2tx\" (UniqueName: \"kubernetes.io/projected/fbad2b6d-0634-4bf4-8217-37985bba54fb-kube-api-access-md2tx\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.828798 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:57 crc kubenswrapper[4760]: I1124 18:02:57.828810 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbad2b6d-0634-4bf4-8217-37985bba54fb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.153682 4760 generic.go:334] "Generic (PLEG): container finished" podID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerID="0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5" exitCode=0 Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.153743 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnf9s" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.153750 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerDied","Data":"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5"} Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.153821 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnf9s" event={"ID":"fbad2b6d-0634-4bf4-8217-37985bba54fb","Type":"ContainerDied","Data":"d76d504e0f49533478a4351fc0833e449afb1ea5d1568857ada42040e5eb38f7"} Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.153884 4760 scope.go:117] "RemoveContainer" containerID="0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.186261 4760 scope.go:117] "RemoveContainer" containerID="c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.188893 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.199110 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xnf9s"] Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.222065 4760 scope.go:117] "RemoveContainer" containerID="36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.256934 4760 scope.go:117] "RemoveContainer" containerID="0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5" Nov 24 18:02:58 crc kubenswrapper[4760]: E1124 18:02:58.257432 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5\": container with ID starting with 0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5 not found: ID does not exist" containerID="0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.257520 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5"} err="failed to get container status \"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5\": rpc error: code = NotFound desc = could not find container \"0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5\": container with ID starting with 0f9f439aeb38b902e4e82849db610310845da6e925fd3aaf3250de436a50c2e5 not found: ID does not exist" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.257552 4760 scope.go:117] "RemoveContainer" containerID="c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1" Nov 24 18:02:58 crc kubenswrapper[4760]: E1124 18:02:58.257880 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1\": container with ID starting with c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1 not found: ID does not exist" containerID="c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.257932 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1"} err="failed to get container status \"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1\": rpc error: code = NotFound desc = could not find container \"c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1\": container with ID starting with c31fd71e3c2ad87d70ae413a3437a8f336fe1f70d649ace1914b3f05647609c1 not found: ID does not exist" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.257966 4760 scope.go:117] "RemoveContainer" containerID="36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b" Nov 24 18:02:58 crc kubenswrapper[4760]: E1124 18:02:58.259468 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b\": container with ID starting with 36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b not found: ID does not exist" containerID="36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b" Nov 24 18:02:58 crc kubenswrapper[4760]: I1124 18:02:58.259498 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b"} err="failed to get container status \"36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b\": rpc error: code = NotFound desc = could not find container \"36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b\": container with ID starting with 36d2b41bbf279f2de88deae2c891d0331fee32e8093355f7049a6c7a42a4bb5b not found: ID does not exist" Nov 24 18:02:59 crc kubenswrapper[4760]: I1124 18:02:59.478265 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" path="/var/lib/kubelet/pods/fbad2b6d-0634-4bf4-8217-37985bba54fb/volumes" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.086542 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:27 crc kubenswrapper[4760]: E1124 18:03:27.088127 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="extract-utilities" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.088147 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="extract-utilities" Nov 24 18:03:27 crc kubenswrapper[4760]: E1124 18:03:27.088177 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="registry-server" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.088183 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="registry-server" Nov 24 18:03:27 crc kubenswrapper[4760]: E1124 18:03:27.088222 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="extract-content" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.088228 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="extract-content" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.088489 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbad2b6d-0634-4bf4-8217-37985bba54fb" containerName="registry-server" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.090261 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.096683 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.226619 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.226872 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlvkj\" (UniqueName: \"kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.226946 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.330219 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlvkj\" (UniqueName: \"kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.330299 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.330388 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.331161 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.331154 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.360576 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlvkj\" (UniqueName: \"kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj\") pod \"redhat-marketplace-mzkn6\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.449377 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:27 crc kubenswrapper[4760]: I1124 18:03:27.898148 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:28 crc kubenswrapper[4760]: I1124 18:03:28.466729 4760 generic.go:334] "Generic (PLEG): container finished" podID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerID="6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8" exitCode=0 Nov 24 18:03:28 crc kubenswrapper[4760]: I1124 18:03:28.467066 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerDied","Data":"6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8"} Nov 24 18:03:28 crc kubenswrapper[4760]: I1124 18:03:28.468877 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 18:03:28 crc kubenswrapper[4760]: I1124 18:03:28.469642 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerStarted","Data":"29c242209da3eac4dd5820f45101b22510d568ee5436c74d149450d0e0e16037"} Nov 24 18:03:29 crc kubenswrapper[4760]: I1124 18:03:29.506292 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerStarted","Data":"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0"} Nov 24 18:03:30 crc kubenswrapper[4760]: I1124 18:03:30.520289 4760 generic.go:334] "Generic (PLEG): container finished" podID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerID="0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0" exitCode=0 Nov 24 18:03:30 crc kubenswrapper[4760]: I1124 18:03:30.520333 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerDied","Data":"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0"} Nov 24 18:03:31 crc kubenswrapper[4760]: I1124 18:03:31.529918 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerStarted","Data":"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400"} Nov 24 18:03:31 crc kubenswrapper[4760]: I1124 18:03:31.550251 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mzkn6" podStartSLOduration=2.053870558 podStartE2EDuration="4.550234209s" podCreationTimestamp="2025-11-24 18:03:27 +0000 UTC" firstStartedPulling="2025-11-24 18:03:28.468532504 +0000 UTC m=+3603.791414074" lastFinishedPulling="2025-11-24 18:03:30.964896175 +0000 UTC m=+3606.287777725" observedRunningTime="2025-11-24 18:03:31.547320636 +0000 UTC m=+3606.870202196" watchObservedRunningTime="2025-11-24 18:03:31.550234209 +0000 UTC m=+3606.873115759" Nov 24 18:03:35 crc kubenswrapper[4760]: I1124 18:03:35.643155 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:03:35 crc kubenswrapper[4760]: I1124 18:03:35.643741 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:03:37 crc kubenswrapper[4760]: I1124 18:03:37.449685 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:37 crc kubenswrapper[4760]: I1124 18:03:37.451302 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:37 crc kubenswrapper[4760]: I1124 18:03:37.508197 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:37 crc kubenswrapper[4760]: I1124 18:03:37.641394 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:37 crc kubenswrapper[4760]: I1124 18:03:37.747500 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:39 crc kubenswrapper[4760]: I1124 18:03:39.609125 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mzkn6" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="registry-server" containerID="cri-o://94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400" gracePeriod=2 Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.117949 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.215897 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities\") pod \"1539cf2e-21d9-4f92-8d8b-cd6591854594\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.216400 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content\") pod \"1539cf2e-21d9-4f92-8d8b-cd6591854594\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.216819 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlvkj\" (UniqueName: \"kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj\") pod \"1539cf2e-21d9-4f92-8d8b-cd6591854594\" (UID: \"1539cf2e-21d9-4f92-8d8b-cd6591854594\") " Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.217069 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities" (OuterVolumeSpecName: "utilities") pod "1539cf2e-21d9-4f92-8d8b-cd6591854594" (UID: "1539cf2e-21d9-4f92-8d8b-cd6591854594"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.217305 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.221896 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj" (OuterVolumeSpecName: "kube-api-access-vlvkj") pod "1539cf2e-21d9-4f92-8d8b-cd6591854594" (UID: "1539cf2e-21d9-4f92-8d8b-cd6591854594"). InnerVolumeSpecName "kube-api-access-vlvkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.238501 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1539cf2e-21d9-4f92-8d8b-cd6591854594" (UID: "1539cf2e-21d9-4f92-8d8b-cd6591854594"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.319085 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlvkj\" (UniqueName: \"kubernetes.io/projected/1539cf2e-21d9-4f92-8d8b-cd6591854594-kube-api-access-vlvkj\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.319175 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1539cf2e-21d9-4f92-8d8b-cd6591854594-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.629622 4760 generic.go:334] "Generic (PLEG): container finished" podID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerID="94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400" exitCode=0 Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.629665 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerDied","Data":"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400"} Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.629690 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzkn6" event={"ID":"1539cf2e-21d9-4f92-8d8b-cd6591854594","Type":"ContainerDied","Data":"29c242209da3eac4dd5820f45101b22510d568ee5436c74d149450d0e0e16037"} Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.629707 4760 scope.go:117] "RemoveContainer" containerID="94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.629835 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzkn6" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.650086 4760 scope.go:117] "RemoveContainer" containerID="0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.665407 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.673220 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzkn6"] Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.683553 4760 scope.go:117] "RemoveContainer" containerID="6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.726065 4760 scope.go:117] "RemoveContainer" containerID="94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400" Nov 24 18:03:40 crc kubenswrapper[4760]: E1124 18:03:40.726660 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400\": container with ID starting with 94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400 not found: ID does not exist" containerID="94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.726693 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400"} err="failed to get container status \"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400\": rpc error: code = NotFound desc = could not find container \"94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400\": container with ID starting with 94990fa10a9524e240f89e8d839d71276face1c7bae8a205c8b300b906942400 not found: ID does not exist" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.726714 4760 scope.go:117] "RemoveContainer" containerID="0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0" Nov 24 18:03:40 crc kubenswrapper[4760]: E1124 18:03:40.726965 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0\": container with ID starting with 0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0 not found: ID does not exist" containerID="0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.726988 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0"} err="failed to get container status \"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0\": rpc error: code = NotFound desc = could not find container \"0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0\": container with ID starting with 0523f1732473aaa139ad8e46bf213a4d19100b3948f4c1164ec5dadd70cdfbd0 not found: ID does not exist" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.727014 4760 scope.go:117] "RemoveContainer" containerID="6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8" Nov 24 18:03:40 crc kubenswrapper[4760]: E1124 18:03:40.727263 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8\": container with ID starting with 6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8 not found: ID does not exist" containerID="6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8" Nov 24 18:03:40 crc kubenswrapper[4760]: I1124 18:03:40.727282 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8"} err="failed to get container status \"6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8\": rpc error: code = NotFound desc = could not find container \"6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8\": container with ID starting with 6b7780fec1b029d96e01a9c4a3e2c00552153bb027ce5fd68951f0a2509ce3d8 not found: ID does not exist" Nov 24 18:03:41 crc kubenswrapper[4760]: I1124 18:03:41.477292 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" path="/var/lib/kubelet/pods/1539cf2e-21d9-4f92-8d8b-cd6591854594/volumes" Nov 24 18:03:44 crc kubenswrapper[4760]: I1124 18:03:44.664901 4760 generic.go:334] "Generic (PLEG): container finished" podID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerID="c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe" exitCode=0 Nov 24 18:03:44 crc kubenswrapper[4760]: I1124 18:03:44.665069 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" event={"ID":"01a25ea0-debf-4dbb-89a7-75087fa4d098","Type":"ContainerDied","Data":"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe"} Nov 24 18:03:44 crc kubenswrapper[4760]: I1124 18:03:44.666405 4760 scope.go:117] "RemoveContainer" containerID="c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe" Nov 24 18:03:45 crc kubenswrapper[4760]: I1124 18:03:45.680166 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nz6g9_must-gather-7dx5g_01a25ea0-debf-4dbb-89a7-75087fa4d098/gather/0.log" Nov 24 18:03:52 crc kubenswrapper[4760]: I1124 18:03:52.872069 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nz6g9/must-gather-7dx5g"] Nov 24 18:03:52 crc kubenswrapper[4760]: I1124 18:03:52.872919 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="copy" containerID="cri-o://aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995" gracePeriod=2 Nov 24 18:03:52 crc kubenswrapper[4760]: I1124 18:03:52.879802 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nz6g9/must-gather-7dx5g"] Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.298238 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nz6g9_must-gather-7dx5g_01a25ea0-debf-4dbb-89a7-75087fa4d098/copy/0.log" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.299154 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.459394 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch8m8\" (UniqueName: \"kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8\") pod \"01a25ea0-debf-4dbb-89a7-75087fa4d098\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.459670 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output\") pod \"01a25ea0-debf-4dbb-89a7-75087fa4d098\" (UID: \"01a25ea0-debf-4dbb-89a7-75087fa4d098\") " Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.466207 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8" (OuterVolumeSpecName: "kube-api-access-ch8m8") pod "01a25ea0-debf-4dbb-89a7-75087fa4d098" (UID: "01a25ea0-debf-4dbb-89a7-75087fa4d098"). InnerVolumeSpecName "kube-api-access-ch8m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.562653 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch8m8\" (UniqueName: \"kubernetes.io/projected/01a25ea0-debf-4dbb-89a7-75087fa4d098-kube-api-access-ch8m8\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.583243 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "01a25ea0-debf-4dbb-89a7-75087fa4d098" (UID: "01a25ea0-debf-4dbb-89a7-75087fa4d098"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.664564 4760 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/01a25ea0-debf-4dbb-89a7-75087fa4d098-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.743502 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nz6g9_must-gather-7dx5g_01a25ea0-debf-4dbb-89a7-75087fa4d098/copy/0.log" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.743973 4760 generic.go:334] "Generic (PLEG): container finished" podID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerID="aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995" exitCode=143 Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.744024 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nz6g9/must-gather-7dx5g" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.744090 4760 scope.go:117] "RemoveContainer" containerID="aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.765787 4760 scope.go:117] "RemoveContainer" containerID="c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.825641 4760 scope.go:117] "RemoveContainer" containerID="aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995" Nov 24 18:03:53 crc kubenswrapper[4760]: E1124 18:03:53.826243 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995\": container with ID starting with aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995 not found: ID does not exist" containerID="aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.826273 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995"} err="failed to get container status \"aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995\": rpc error: code = NotFound desc = could not find container \"aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995\": container with ID starting with aec3e7657ae6a067b78dcd4f9c096515be44c1b7968a46100f49a4aac77e7995 not found: ID does not exist" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.826296 4760 scope.go:117] "RemoveContainer" containerID="c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe" Nov 24 18:03:53 crc kubenswrapper[4760]: E1124 18:03:53.826569 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe\": container with ID starting with c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe not found: ID does not exist" containerID="c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe" Nov 24 18:03:53 crc kubenswrapper[4760]: I1124 18:03:53.826597 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe"} err="failed to get container status \"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe\": rpc error: code = NotFound desc = could not find container \"c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe\": container with ID starting with c6ff6e7368effb0a964ebbfa0d5b5ba9d534ccfeb273c7f7f0ae84c230039dfe not found: ID does not exist" Nov 24 18:03:55 crc kubenswrapper[4760]: I1124 18:03:55.476472 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" path="/var/lib/kubelet/pods/01a25ea0-debf-4dbb-89a7-75087fa4d098/volumes" Nov 24 18:04:05 crc kubenswrapper[4760]: I1124 18:04:05.642390 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:04:05 crc kubenswrapper[4760]: I1124 18:04:05.642930 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:04:35 crc kubenswrapper[4760]: I1124 18:04:35.642744 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:04:35 crc kubenswrapper[4760]: I1124 18:04:35.643782 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:04:35 crc kubenswrapper[4760]: I1124 18:04:35.643860 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 18:04:35 crc kubenswrapper[4760]: I1124 18:04:35.645797 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:04:35 crc kubenswrapper[4760]: I1124 18:04:35.645992 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" gracePeriod=600 Nov 24 18:04:35 crc kubenswrapper[4760]: E1124 18:04:35.779555 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:04:36 crc kubenswrapper[4760]: I1124 18:04:36.110827 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" exitCode=0 Nov 24 18:04:36 crc kubenswrapper[4760]: I1124 18:04:36.110892 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b"} Nov 24 18:04:36 crc kubenswrapper[4760]: I1124 18:04:36.110929 4760 scope.go:117] "RemoveContainer" containerID="068cd700a48609c485da1623d466fe38a9ca4d92ad19e40e3c85a4e6d8a2c01b" Nov 24 18:04:36 crc kubenswrapper[4760]: I1124 18:04:36.111723 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:04:36 crc kubenswrapper[4760]: E1124 18:04:36.112180 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:04:48 crc kubenswrapper[4760]: I1124 18:04:48.466647 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:04:48 crc kubenswrapper[4760]: E1124 18:04:48.467600 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:04:59 crc kubenswrapper[4760]: I1124 18:04:59.467456 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:04:59 crc kubenswrapper[4760]: E1124 18:04:59.468791 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:05:12 crc kubenswrapper[4760]: I1124 18:05:12.467876 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:05:12 crc kubenswrapper[4760]: E1124 18:05:12.468892 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:05:25 crc kubenswrapper[4760]: I1124 18:05:25.479285 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:05:25 crc kubenswrapper[4760]: E1124 18:05:25.480513 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:05:34 crc kubenswrapper[4760]: I1124 18:05:34.235295 4760 scope.go:117] "RemoveContainer" containerID="8d9a9ee11b1767c9623d7c4568db65ba4dc3094c6db5b203ed6908881eb29d03" Nov 24 18:05:39 crc kubenswrapper[4760]: I1124 18:05:39.467242 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:05:39 crc kubenswrapper[4760]: E1124 18:05:39.468498 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.880822 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:05:52 crc kubenswrapper[4760]: E1124 18:05:52.881783 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="registry-server" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.881797 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="registry-server" Nov 24 18:05:52 crc kubenswrapper[4760]: E1124 18:05:52.881813 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="extract-content" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.881820 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="extract-content" Nov 24 18:05:52 crc kubenswrapper[4760]: E1124 18:05:52.881843 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="extract-utilities" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.881849 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="extract-utilities" Nov 24 18:05:52 crc kubenswrapper[4760]: E1124 18:05:52.881864 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="gather" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.881869 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="gather" Nov 24 18:05:52 crc kubenswrapper[4760]: E1124 18:05:52.881881 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="copy" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.881886 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="copy" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.882080 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="1539cf2e-21d9-4f92-8d8b-cd6591854594" containerName="registry-server" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.882091 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="copy" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.882100 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="01a25ea0-debf-4dbb-89a7-75087fa4d098" containerName="gather" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.883372 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.901176 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.924859 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.924964 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:52 crc kubenswrapper[4760]: I1124 18:05:52.925020 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv6vs\" (UniqueName: \"kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.027227 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv6vs\" (UniqueName: \"kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.027342 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.027447 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.028045 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.028423 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.056777 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv6vs\" (UniqueName: \"kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs\") pod \"redhat-operators-v6xxb\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.210421 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.655442 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.945163 4760 generic.go:334] "Generic (PLEG): container finished" podID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerID="f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb" exitCode=0 Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.945245 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerDied","Data":"f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb"} Nov 24 18:05:53 crc kubenswrapper[4760]: I1124 18:05:53.945438 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerStarted","Data":"223b0bc905bfd880ea4c9a7a56815f69c82af88e7a1762dd30e5a84817227271"} Nov 24 18:05:54 crc kubenswrapper[4760]: I1124 18:05:54.466727 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:05:54 crc kubenswrapper[4760]: E1124 18:05:54.466934 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:05:55 crc kubenswrapper[4760]: I1124 18:05:55.966928 4760 generic.go:334] "Generic (PLEG): container finished" podID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerID="5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5" exitCode=0 Nov 24 18:05:55 crc kubenswrapper[4760]: I1124 18:05:55.967055 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerDied","Data":"5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5"} Nov 24 18:05:56 crc kubenswrapper[4760]: I1124 18:05:56.983431 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerStarted","Data":"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66"} Nov 24 18:05:57 crc kubenswrapper[4760]: I1124 18:05:57.006545 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v6xxb" podStartSLOduration=2.457016156 podStartE2EDuration="5.006521195s" podCreationTimestamp="2025-11-24 18:05:52 +0000 UTC" firstStartedPulling="2025-11-24 18:05:53.94828447 +0000 UTC m=+3749.271166020" lastFinishedPulling="2025-11-24 18:05:56.497789499 +0000 UTC m=+3751.820671059" observedRunningTime="2025-11-24 18:05:57.004426715 +0000 UTC m=+3752.327308265" watchObservedRunningTime="2025-11-24 18:05:57.006521195 +0000 UTC m=+3752.329402755" Nov 24 18:06:03 crc kubenswrapper[4760]: I1124 18:06:03.210816 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:03 crc kubenswrapper[4760]: I1124 18:06:03.211436 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:03 crc kubenswrapper[4760]: I1124 18:06:03.277675 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:04 crc kubenswrapper[4760]: I1124 18:06:04.101657 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:04 crc kubenswrapper[4760]: I1124 18:06:04.156708 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:06:06 crc kubenswrapper[4760]: I1124 18:06:06.072712 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v6xxb" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="registry-server" containerID="cri-o://f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66" gracePeriod=2 Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.665952 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.680822 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities\") pod \"5bd75bd5-b636-4ec8-839a-679ce21581d9\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.680870 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rv6vs\" (UniqueName: \"kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs\") pod \"5bd75bd5-b636-4ec8-839a-679ce21581d9\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.682301 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities" (OuterVolumeSpecName: "utilities") pod "5bd75bd5-b636-4ec8-839a-679ce21581d9" (UID: "5bd75bd5-b636-4ec8-839a-679ce21581d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.684571 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content\") pod \"5bd75bd5-b636-4ec8-839a-679ce21581d9\" (UID: \"5bd75bd5-b636-4ec8-839a-679ce21581d9\") " Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.685544 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.689109 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs" (OuterVolumeSpecName: "kube-api-access-rv6vs") pod "5bd75bd5-b636-4ec8-839a-679ce21581d9" (UID: "5bd75bd5-b636-4ec8-839a-679ce21581d9"). InnerVolumeSpecName "kube-api-access-rv6vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.782239 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5bd75bd5-b636-4ec8-839a-679ce21581d9" (UID: "5bd75bd5-b636-4ec8-839a-679ce21581d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.787970 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bd75bd5-b636-4ec8-839a-679ce21581d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:07 crc kubenswrapper[4760]: I1124 18:06:07.788034 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rv6vs\" (UniqueName: \"kubernetes.io/projected/5bd75bd5-b636-4ec8-839a-679ce21581d9-kube-api-access-rv6vs\") on node \"crc\" DevicePath \"\"" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.093407 4760 generic.go:334] "Generic (PLEG): container finished" podID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerID="f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66" exitCode=0 Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.093463 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerDied","Data":"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66"} Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.093500 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xxb" event={"ID":"5bd75bd5-b636-4ec8-839a-679ce21581d9","Type":"ContainerDied","Data":"223b0bc905bfd880ea4c9a7a56815f69c82af88e7a1762dd30e5a84817227271"} Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.093517 4760 scope.go:117] "RemoveContainer" containerID="f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.093549 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xxb" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.128529 4760 scope.go:117] "RemoveContainer" containerID="5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.133233 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.141050 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v6xxb"] Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.155374 4760 scope.go:117] "RemoveContainer" containerID="f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.197135 4760 scope.go:117] "RemoveContainer" containerID="f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66" Nov 24 18:06:08 crc kubenswrapper[4760]: E1124 18:06:08.198477 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66\": container with ID starting with f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66 not found: ID does not exist" containerID="f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.198599 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66"} err="failed to get container status \"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66\": rpc error: code = NotFound desc = could not find container \"f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66\": container with ID starting with f08f5e0c4285d18a56e8018545d4164e0ebde0e2dd0284005c77974d8027eb66 not found: ID does not exist" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.198631 4760 scope.go:117] "RemoveContainer" containerID="5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5" Nov 24 18:06:08 crc kubenswrapper[4760]: E1124 18:06:08.198903 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5\": container with ID starting with 5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5 not found: ID does not exist" containerID="5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.198928 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5"} err="failed to get container status \"5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5\": rpc error: code = NotFound desc = could not find container \"5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5\": container with ID starting with 5072b9bd2e2f21edee21dcf835b435ec82bf45e90849c48689e41d25a4af4fa5 not found: ID does not exist" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.198941 4760 scope.go:117] "RemoveContainer" containerID="f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb" Nov 24 18:06:08 crc kubenswrapper[4760]: E1124 18:06:08.199154 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb\": container with ID starting with f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb not found: ID does not exist" containerID="f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb" Nov 24 18:06:08 crc kubenswrapper[4760]: I1124 18:06:08.199174 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb"} err="failed to get container status \"f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb\": rpc error: code = NotFound desc = could not find container \"f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb\": container with ID starting with f39f427d7ca457bcc3a1581717009428d3228b1af16b12442c8c7733c5dddceb not found: ID does not exist" Nov 24 18:06:09 crc kubenswrapper[4760]: I1124 18:06:09.468107 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:06:09 crc kubenswrapper[4760]: E1124 18:06:09.468307 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:06:09 crc kubenswrapper[4760]: I1124 18:06:09.479911 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" path="/var/lib/kubelet/pods/5bd75bd5-b636-4ec8-839a-679ce21581d9/volumes" Nov 24 18:06:23 crc kubenswrapper[4760]: I1124 18:06:23.466941 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:06:23 crc kubenswrapper[4760]: E1124 18:06:23.467927 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:06:34 crc kubenswrapper[4760]: I1124 18:06:34.466580 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:06:34 crc kubenswrapper[4760]: E1124 18:06:34.467352 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.902055 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-9p2gn/must-gather-5t4fq"] Nov 24 18:06:43 crc kubenswrapper[4760]: E1124 18:06:43.903096 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="registry-server" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.903114 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="registry-server" Nov 24 18:06:43 crc kubenswrapper[4760]: E1124 18:06:43.903162 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="extract-content" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.903171 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="extract-content" Nov 24 18:06:43 crc kubenswrapper[4760]: E1124 18:06:43.903193 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="extract-utilities" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.903201 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="extract-utilities" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.903413 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd75bd5-b636-4ec8-839a-679ce21581d9" containerName="registry-server" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.904537 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.907049 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-9p2gn"/"kube-root-ca.crt" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.907322 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-9p2gn"/"openshift-service-ca.crt" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.907440 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-9p2gn"/"default-dockercfg-pqjqz" Nov 24 18:06:43 crc kubenswrapper[4760]: I1124 18:06:43.922700 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-9p2gn/must-gather-5t4fq"] Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.007022 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9f2vg\" (UniqueName: \"kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.007116 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.109420 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9f2vg\" (UniqueName: \"kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.109575 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.110267 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.432738 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9f2vg\" (UniqueName: \"kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg\") pod \"must-gather-5t4fq\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.525256 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:06:44 crc kubenswrapper[4760]: I1124 18:06:44.980443 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-9p2gn/must-gather-5t4fq"] Nov 24 18:06:45 crc kubenswrapper[4760]: I1124 18:06:45.479904 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" event={"ID":"a6ec6e50-7fdc-459f-96b3-ed244e7da412","Type":"ContainerStarted","Data":"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207"} Nov 24 18:06:45 crc kubenswrapper[4760]: I1124 18:06:45.480361 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" event={"ID":"a6ec6e50-7fdc-459f-96b3-ed244e7da412","Type":"ContainerStarted","Data":"4ebaaa2816d9152c97faa2f1925cd1c8267fefef7216f6e5ebc844d0a5953b16"} Nov 24 18:06:46 crc kubenswrapper[4760]: I1124 18:06:46.490979 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" event={"ID":"a6ec6e50-7fdc-459f-96b3-ed244e7da412","Type":"ContainerStarted","Data":"d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7"} Nov 24 18:06:46 crc kubenswrapper[4760]: I1124 18:06:46.517892 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" podStartSLOduration=3.517873589 podStartE2EDuration="3.517873589s" podCreationTimestamp="2025-11-24 18:06:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:46.50916481 +0000 UTC m=+3801.832046370" watchObservedRunningTime="2025-11-24 18:06:46.517873589 +0000 UTC m=+3801.840755139" Nov 24 18:06:47 crc kubenswrapper[4760]: I1124 18:06:47.466113 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:06:47 crc kubenswrapper[4760]: E1124 18:06:47.466665 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:06:48 crc kubenswrapper[4760]: I1124 18:06:48.896041 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-bfj52"] Nov 24 18:06:48 crc kubenswrapper[4760]: I1124 18:06:48.897663 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:48 crc kubenswrapper[4760]: I1124 18:06:48.996397 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:48 crc kubenswrapper[4760]: I1124 18:06:48.996463 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx5xm\" (UniqueName: \"kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.097902 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.097987 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx5xm\" (UniqueName: \"kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.098150 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.114534 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx5xm\" (UniqueName: \"kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm\") pod \"crc-debug-bfj52\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.218394 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:06:49 crc kubenswrapper[4760]: W1124 18:06:49.256023 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e17d0b4_67e3_49b5_bfa4_d85ea473e734.slice/crio-bc0aa7361d9573ed83ad869aa8c3cb1e09ec4391888d3e665273062dfb242c9a WatchSource:0}: Error finding container bc0aa7361d9573ed83ad869aa8c3cb1e09ec4391888d3e665273062dfb242c9a: Status 404 returned error can't find the container with id bc0aa7361d9573ed83ad869aa8c3cb1e09ec4391888d3e665273062dfb242c9a Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.518800 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" event={"ID":"3e17d0b4-67e3-49b5-bfa4-d85ea473e734","Type":"ContainerStarted","Data":"3ac556edc40db3bc078a1837bb6969a4cce66c8a416098e5da282e16929b64ba"} Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.519154 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" event={"ID":"3e17d0b4-67e3-49b5-bfa4-d85ea473e734","Type":"ContainerStarted","Data":"bc0aa7361d9573ed83ad869aa8c3cb1e09ec4391888d3e665273062dfb242c9a"} Nov 24 18:06:49 crc kubenswrapper[4760]: I1124 18:06:49.536324 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" podStartSLOduration=1.536306027 podStartE2EDuration="1.536306027s" podCreationTimestamp="2025-11-24 18:06:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:06:49.532492968 +0000 UTC m=+3804.855374518" watchObservedRunningTime="2025-11-24 18:06:49.536306027 +0000 UTC m=+3804.859187577" Nov 24 18:06:59 crc kubenswrapper[4760]: I1124 18:06:59.466186 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:06:59 crc kubenswrapper[4760]: E1124 18:06:59.466976 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:07:02 crc kubenswrapper[4760]: I1124 18:07:02.301270 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-69f4488969-xwpx8" podUID="37d3f873-9ed8-47d6-b62d-3b007dca3936" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 18:07:14 crc kubenswrapper[4760]: I1124 18:07:14.466323 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:07:14 crc kubenswrapper[4760]: E1124 18:07:14.467319 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:07:25 crc kubenswrapper[4760]: I1124 18:07:25.847077 4760 generic.go:334] "Generic (PLEG): container finished" podID="3e17d0b4-67e3-49b5-bfa4-d85ea473e734" containerID="3ac556edc40db3bc078a1837bb6969a4cce66c8a416098e5da282e16929b64ba" exitCode=0 Nov 24 18:07:25 crc kubenswrapper[4760]: I1124 18:07:25.847131 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" event={"ID":"3e17d0b4-67e3-49b5-bfa4-d85ea473e734","Type":"ContainerDied","Data":"3ac556edc40db3bc078a1837bb6969a4cce66c8a416098e5da282e16929b64ba"} Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:26.999753 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.043276 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-bfj52"] Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.053605 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-bfj52"] Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.175953 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx5xm\" (UniqueName: \"kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm\") pod \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.176271 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host\") pod \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\" (UID: \"3e17d0b4-67e3-49b5-bfa4-d85ea473e734\") " Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.176848 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host" (OuterVolumeSpecName: "host") pod "3e17d0b4-67e3-49b5-bfa4-d85ea473e734" (UID: "3e17d0b4-67e3-49b5-bfa4-d85ea473e734"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.182323 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm" (OuterVolumeSpecName: "kube-api-access-xx5xm") pod "3e17d0b4-67e3-49b5-bfa4-d85ea473e734" (UID: "3e17d0b4-67e3-49b5-bfa4-d85ea473e734"). InnerVolumeSpecName "kube-api-access-xx5xm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.278574 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx5xm\" (UniqueName: \"kubernetes.io/projected/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-kube-api-access-xx5xm\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.278641 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3e17d0b4-67e3-49b5-bfa4-d85ea473e734-host\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.480104 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e17d0b4-67e3-49b5-bfa4-d85ea473e734" path="/var/lib/kubelet/pods/3e17d0b4-67e3-49b5-bfa4-d85ea473e734/volumes" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.871658 4760 scope.go:117] "RemoveContainer" containerID="3ac556edc40db3bc078a1837bb6969a4cce66c8a416098e5da282e16929b64ba" Nov 24 18:07:27 crc kubenswrapper[4760]: I1124 18:07:27.871875 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-bfj52" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.249890 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-nzbk4"] Nov 24 18:07:28 crc kubenswrapper[4760]: E1124 18:07:28.251831 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e17d0b4-67e3-49b5-bfa4-d85ea473e734" containerName="container-00" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.251969 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e17d0b4-67e3-49b5-bfa4-d85ea473e734" containerName="container-00" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.252448 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e17d0b4-67e3-49b5-bfa4-d85ea473e734" containerName="container-00" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.253303 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.410366 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.410655 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfvpr\" (UniqueName: \"kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.512442 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.513110 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfvpr\" (UniqueName: \"kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.512590 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.559037 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfvpr\" (UniqueName: \"kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr\") pod \"crc-debug-nzbk4\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.574275 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:28 crc kubenswrapper[4760]: W1124 18:07:28.612417 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73bcd44e_dcdb_4cef_aee4_73cfb3176ea8.slice/crio-be6fa424c20d4ce155ceea1568272a0d91fe25ce94e8dc3272723c0d476e11a8 WatchSource:0}: Error finding container be6fa424c20d4ce155ceea1568272a0d91fe25ce94e8dc3272723c0d476e11a8: Status 404 returned error can't find the container with id be6fa424c20d4ce155ceea1568272a0d91fe25ce94e8dc3272723c0d476e11a8 Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.904502 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" event={"ID":"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8","Type":"ContainerStarted","Data":"9a6783b30a7deedfd9cfb9f607bb52d83ad42013c842cceb963ca8098b01fdfb"} Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.905141 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" event={"ID":"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8","Type":"ContainerStarted","Data":"be6fa424c20d4ce155ceea1568272a0d91fe25ce94e8dc3272723c0d476e11a8"} Nov 24 18:07:28 crc kubenswrapper[4760]: I1124 18:07:28.940141 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" podStartSLOduration=0.940111536 podStartE2EDuration="940.111536ms" podCreationTimestamp="2025-11-24 18:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:07:28.922754541 +0000 UTC m=+3844.245636131" watchObservedRunningTime="2025-11-24 18:07:28.940111536 +0000 UTC m=+3844.262993126" Nov 24 18:07:29 crc kubenswrapper[4760]: I1124 18:07:29.467095 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:07:29 crc kubenswrapper[4760]: E1124 18:07:29.467468 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:07:29 crc kubenswrapper[4760]: I1124 18:07:29.918127 4760 generic.go:334] "Generic (PLEG): container finished" podID="73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" containerID="9a6783b30a7deedfd9cfb9f607bb52d83ad42013c842cceb963ca8098b01fdfb" exitCode=0 Nov 24 18:07:29 crc kubenswrapper[4760]: I1124 18:07:29.918177 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" event={"ID":"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8","Type":"ContainerDied","Data":"9a6783b30a7deedfd9cfb9f607bb52d83ad42013c842cceb963ca8098b01fdfb"} Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.041440 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.077092 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-nzbk4"] Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.089861 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-nzbk4"] Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.164917 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfvpr\" (UniqueName: \"kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr\") pod \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.164995 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host\") pod \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\" (UID: \"73bcd44e-dcdb-4cef-aee4-73cfb3176ea8\") " Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.165063 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host" (OuterVolumeSpecName: "host") pod "73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" (UID: "73bcd44e-dcdb-4cef-aee4-73cfb3176ea8"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.165599 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-host\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.171248 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr" (OuterVolumeSpecName: "kube-api-access-lfvpr") pod "73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" (UID: "73bcd44e-dcdb-4cef-aee4-73cfb3176ea8"). InnerVolumeSpecName "kube-api-access-lfvpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.267572 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfvpr\" (UniqueName: \"kubernetes.io/projected/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8-kube-api-access-lfvpr\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.477764 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" path="/var/lib/kubelet/pods/73bcd44e-dcdb-4cef-aee4-73cfb3176ea8/volumes" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.941594 4760 scope.go:117] "RemoveContainer" containerID="9a6783b30a7deedfd9cfb9f607bb52d83ad42013c842cceb963ca8098b01fdfb" Nov 24 18:07:31 crc kubenswrapper[4760]: I1124 18:07:31.941640 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-nzbk4" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.231232 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-h9fqj"] Nov 24 18:07:32 crc kubenswrapper[4760]: E1124 18:07:32.231958 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" containerName="container-00" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.231973 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" containerName="container-00" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.232391 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="73bcd44e-dcdb-4cef-aee4-73cfb3176ea8" containerName="container-00" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.233280 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.284493 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.284771 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc2hl\" (UniqueName: \"kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.387331 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.387595 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc2hl\" (UniqueName: \"kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.387470 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.417800 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc2hl\" (UniqueName: \"kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl\") pod \"crc-debug-h9fqj\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.557066 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:32 crc kubenswrapper[4760]: W1124 18:07:32.599842 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3dca026_5c42_4393_b0da_63d2fb7d8e5e.slice/crio-46a96e9bbebcb45dd2295ef13490bfa8e02c61d56a67f86e90ec5398a20a0eb2 WatchSource:0}: Error finding container 46a96e9bbebcb45dd2295ef13490bfa8e02c61d56a67f86e90ec5398a20a0eb2: Status 404 returned error can't find the container with id 46a96e9bbebcb45dd2295ef13490bfa8e02c61d56a67f86e90ec5398a20a0eb2 Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.952533 4760 generic.go:334] "Generic (PLEG): container finished" podID="c3dca026-5c42-4393-b0da-63d2fb7d8e5e" containerID="60d7aab2be8a51d7e4a94ddbecb4e20730c91bdb36408dfb6783162357b5d3f7" exitCode=0 Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.952667 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" event={"ID":"c3dca026-5c42-4393-b0da-63d2fb7d8e5e","Type":"ContainerDied","Data":"60d7aab2be8a51d7e4a94ddbecb4e20730c91bdb36408dfb6783162357b5d3f7"} Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.952928 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" event={"ID":"c3dca026-5c42-4393-b0da-63d2fb7d8e5e","Type":"ContainerStarted","Data":"46a96e9bbebcb45dd2295ef13490bfa8e02c61d56a67f86e90ec5398a20a0eb2"} Nov 24 18:07:32 crc kubenswrapper[4760]: I1124 18:07:32.997210 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-h9fqj"] Nov 24 18:07:33 crc kubenswrapper[4760]: I1124 18:07:33.006569 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-9p2gn/crc-debug-h9fqj"] Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.062731 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.219035 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host\") pod \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.219150 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc2hl\" (UniqueName: \"kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl\") pod \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\" (UID: \"c3dca026-5c42-4393-b0da-63d2fb7d8e5e\") " Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.219144 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host" (OuterVolumeSpecName: "host") pod "c3dca026-5c42-4393-b0da-63d2fb7d8e5e" (UID: "c3dca026-5c42-4393-b0da-63d2fb7d8e5e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.231153 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl" (OuterVolumeSpecName: "kube-api-access-nc2hl") pod "c3dca026-5c42-4393-b0da-63d2fb7d8e5e" (UID: "c3dca026-5c42-4393-b0da-63d2fb7d8e5e"). InnerVolumeSpecName "kube-api-access-nc2hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.322369 4760 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-host\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.322429 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc2hl\" (UniqueName: \"kubernetes.io/projected/c3dca026-5c42-4393-b0da-63d2fb7d8e5e-kube-api-access-nc2hl\") on node \"crc\" DevicePath \"\"" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.980155 4760 scope.go:117] "RemoveContainer" containerID="60d7aab2be8a51d7e4a94ddbecb4e20730c91bdb36408dfb6783162357b5d3f7" Nov 24 18:07:34 crc kubenswrapper[4760]: I1124 18:07:34.980200 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/crc-debug-h9fqj" Nov 24 18:07:35 crc kubenswrapper[4760]: I1124 18:07:35.479893 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3dca026-5c42-4393-b0da-63d2fb7d8e5e" path="/var/lib/kubelet/pods/c3dca026-5c42-4393-b0da-63d2fb7d8e5e/volumes" Nov 24 18:07:40 crc kubenswrapper[4760]: I1124 18:07:40.465892 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:07:40 crc kubenswrapper[4760]: E1124 18:07:40.466705 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.449197 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b95dd9bc6-5gb75_0ad3d643-7c73-4b15-966d-d4c7cb1d2438/barbican-api/0.log" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.466816 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:07:52 crc kubenswrapper[4760]: E1124 18:07:52.467178 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.610733 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b95dd9bc6-5gb75_0ad3d643-7c73-4b15-966d-d4c7cb1d2438/barbican-api-log/0.log" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.687673 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b6b7c6d54-mkcp9_bd4e39bc-5c35-4906-906a-a5558f2861de/barbican-keystone-listener/0.log" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.816288 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b6b7c6d54-mkcp9_bd4e39bc-5c35-4906-906a-a5558f2861de/barbican-keystone-listener-log/0.log" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.881536 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bc9c99c9f-fs95m_4ce07bef-e13b-45e6-ad5e-b7372c3b1432/barbican-worker-log/0.log" Nov 24 18:07:52 crc kubenswrapper[4760]: I1124 18:07:52.901228 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bc9c99c9f-fs95m_4ce07bef-e13b-45e6-ad5e-b7372c3b1432/barbican-worker/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.083730 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-f4nsm_b163f1e6-048b-4722-bb36-4cd23619b927/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.107443 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/ceilometer-central-agent/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.226976 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/ceilometer-notification-agent/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.255133 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/proxy-httpd/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.303947 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0aa23f2e-1d46-4435-abc2-e019f2070509/sg-core/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.429901 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fd87b6ec-9f3d-41ae-9647-6410620a1f4a/cinder-api-log/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.476152 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_fd87b6ec-9f3d-41ae-9647-6410620a1f4a/cinder-api/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.644394 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4faf4c98-3f75-4b32-b35d-99e020a71f8c/probe/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.670556 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4faf4c98-3f75-4b32-b35d-99e020a71f8c/cinder-scheduler/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.745025 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-5kcqw_60a5bb95-2a7f-43be-a54f-be0872e8331b/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.876198 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-8mjkb_37d99b2c-138d-4470-9807-eec5191203a6/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:53 crc kubenswrapper[4760]: I1124 18:07:53.929819 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/init/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.074620 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/init/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.134651 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-nz2wr_8ed4f25b-14b5-4681-b0d2-36d4f01a0c1e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.151116 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-55478c4467-9frwn_bb018af8-7779-4386-8903-a1dfb982a26e/dnsmasq-dns/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.289412 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8531a189-02f3-4e03-8fca-ff113990ee3e/glance-httpd/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.302511 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8531a189-02f3-4e03-8fca-ff113990ee3e/glance-log/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.460876 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_74e069c9-8459-4455-b520-fa8ba79bb677/glance-log/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.483531 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_74e069c9-8459-4455-b520-fa8ba79bb677/glance-httpd/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.626582 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-bc766455b-9dfnr_20fc1526-eb8d-424b-b03a-784154b5d7fa/horizon/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.797876 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-vnbqh_2522087b-33dd-418b-abb2-813ca0f5a051/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:54 crc kubenswrapper[4760]: I1124 18:07:54.983205 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-bc766455b-9dfnr_20fc1526-eb8d-424b-b03a-784154b5d7fa/horizon-log/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.007739 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sxvjh_a58b5d43-9b4d-4061-96e1-e02c61a4630c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.229089 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5ccbbc7984-m6jkp_1fd753d3-759a-4734-96ac-9c7f1a9138fa/keystone-api/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.255338 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29400121-ttct7_984bae89-5644-4859-ab94-6f00104349eb/keystone-cron/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.397423 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_33972ca1-3846-487a-a8b0-fb67093b1a6d/kube-state-metrics/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.456609 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-w7c5l_d89f2f80-b7b0-49b2-beab-c4fd2d17352f/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.891085 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b5b8bc889-kqfhp_f4823e15-ce2c-4a16-b80e-f676469b3624/neutron-httpd/0.log" Nov 24 18:07:55 crc kubenswrapper[4760]: I1124 18:07:55.923244 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7b5b8bc889-kqfhp_f4823e15-ce2c-4a16-b80e-f676469b3624/neutron-api/0.log" Nov 24 18:07:56 crc kubenswrapper[4760]: I1124 18:07:56.124673 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-4zsk7_e88df757-ac39-4a30-b0aa-eb820708e3b4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:56 crc kubenswrapper[4760]: I1124 18:07:56.629551 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6fcad265-b82c-400e-afce-ac2afac950d0/nova-api-log/0.log" Nov 24 18:07:56 crc kubenswrapper[4760]: I1124 18:07:56.682149 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_d5b9106d-4aee-4439-9c7d-41c1f015fd02/nova-cell0-conductor-conductor/0.log" Nov 24 18:07:56 crc kubenswrapper[4760]: I1124 18:07:56.928573 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_61870fa4-1b0e-450c-a2a8-06d3ba20cd3e/nova-cell1-conductor-conductor/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.011249 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6fcad265-b82c-400e-afce-ac2afac950d0/nova-api-api/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.099733 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d00b658b-a227-4c0c-9f91-d1c09d5f6173/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.186433 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r5fgt_cd29f6ba-13bc-4598-a031-18c0763458dc/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.349718 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fa127b75-3942-4fba-815f-197979d77117/nova-metadata-log/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.729688 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_f54be0fa-3248-4732-b118-546367054335/nova-scheduler-scheduler/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.743443 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/mysql-bootstrap/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.933631 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/mysql-bootstrap/0.log" Nov 24 18:07:57 crc kubenswrapper[4760]: I1124 18:07:57.986389 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e072af6f-796e-4c4c-b7fa-a36ad7b972be/galera/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.139448 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/mysql-bootstrap/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.341762 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/galera/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.365951 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ad1c45c2-91fc-4d03-9778-1f8ac8b891e5/mysql-bootstrap/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.508759 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_dcc1106b-ca31-4432-948b-f01f5f47c370/openstackclient/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.564161 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-dl9cm_39e10c47-4e85-46de-a754-3ee0245718d7/ovn-controller/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.671357 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fa127b75-3942-4fba-815f-197979d77117/nova-metadata-metadata/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.801179 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sq2th_f1b927b9-dc29-4ac4-a3f1-c97233bf7b5a/openstack-network-exporter/0.log" Nov 24 18:07:58 crc kubenswrapper[4760]: I1124 18:07:58.953047 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server-init/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.079685 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server-init/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.168604 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovsdb-server/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.168933 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bnfkl_f9bf53ae-4ba1-4619-b603-550b974e1970/ovs-vswitchd/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.323806 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vb5mp_5e897692-730b-402f-a1a7-5f242a36fe2b/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.402734 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_53891de3-058b-46b8-b7f4-880ca70c1de3/ovn-northd/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.420292 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_53891de3-058b-46b8-b7f4-880ca70c1de3/openstack-network-exporter/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.580656 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20f2f51e-4902-44f9-97d6-1ebf12c22ad6/openstack-network-exporter/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.631328 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20f2f51e-4902-44f9-97d6-1ebf12c22ad6/ovsdbserver-nb/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.790776 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b/ovsdbserver-sb/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.830035 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b4f78e44-1a5c-45c4-a9f5-5ca852e0fc0b/openstack-network-exporter/0.log" Nov 24 18:07:59 crc kubenswrapper[4760]: I1124 18:07:59.936664 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8cf89787b-dxmqp_412c6295-ae70-4706-9c7b-88c4025c9579/placement-api/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.079132 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/setup-container/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.135374 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8cf89787b-dxmqp_412c6295-ae70-4706-9c7b-88c4025c9579/placement-log/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.297331 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/setup-container/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.301966 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b8dd252d-07db-4037-b8c0-09ca191d9f56/rabbitmq/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.368288 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/setup-container/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.557848 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/setup-container/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.579075 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-lnst5_7010932f-cdb9-47d9-8674-07778eda876d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.641836 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8e89585e-aad9-485c-88af-2380cefb8b18/rabbitmq/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.770705 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-7jrgz_bcfd976b-0081-44f8-b0f4-2ca0e2372299/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:00 crc kubenswrapper[4760]: I1124 18:08:00.867842 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-plbxh_bf199ff4-4624-4608-8b45-72a6f1437473/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.089718 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-2jq4r_0bad4d45-bcec-460e-b393-2c8841842af8/ssh-known-hosts-edpm-deployment/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.107903 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-7hvxr_7f9d57fb-8bae-4055-aa31-d14b9cd38b62/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.407822 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-69f4488969-xwpx8_37d3f873-9ed8-47d6-b62d-3b007dca3936/proxy-server/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.434408 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-69f4488969-xwpx8_37d3f873-9ed8-47d6-b62d-3b007dca3936/proxy-httpd/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.552684 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-d9mv7_d0bcc362-6648-4630-b41b-610209865eea/swift-ring-rebalance/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.646845 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-auditor/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.673458 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-reaper/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.859540 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-auditor/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.869811 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-server/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.915759 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/account-replicator/0.log" Nov 24 18:08:01 crc kubenswrapper[4760]: I1124 18:08:01.976600 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-replicator/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.067725 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-server/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.128673 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/container-updater/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.152406 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-auditor/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.267529 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-expirer/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.279893 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-replicator/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.377612 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-server/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.413492 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/object-updater/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.482929 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/rsync/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.493384 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_8b420e33-0bf9-4d88-b33e-b5ba674ea4d9/swift-recon-cron/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.667860 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-p8c9q_54204c3b-38f8-4e55-a645-b8c60b762c89/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.767210 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_eaccf616-1bfd-447a-b4d8-5cfb5b6c5b01/tempest-tests-tempest-tests-runner/0.log" Nov 24 18:08:02 crc kubenswrapper[4760]: I1124 18:08:02.845527 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_d89a1dea-6dac-4fd1-bb44-55076bd67fba/test-operator-logs-container/0.log" Nov 24 18:08:03 crc kubenswrapper[4760]: I1124 18:08:03.014107 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-fx279_a548ab89-b523-4f50-b490-7470e05662b6/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 18:08:05 crc kubenswrapper[4760]: I1124 18:08:05.473689 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:08:05 crc kubenswrapper[4760]: E1124 18:08:05.474273 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:08:12 crc kubenswrapper[4760]: I1124 18:08:12.160487 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_5645a4cb-e092-4b3f-a704-c3497f304e80/memcached/0.log" Nov 24 18:08:19 crc kubenswrapper[4760]: I1124 18:08:19.465950 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:08:19 crc kubenswrapper[4760]: E1124 18:08:19.466669 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:08:26 crc kubenswrapper[4760]: I1124 18:08:26.974846 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-f97qw_c07ab946-dbd4-4fbf-b17c-7bfa133e1c96/manager/0.log" Nov 24 18:08:26 crc kubenswrapper[4760]: I1124 18:08:26.980102 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-f97qw_c07ab946-dbd4-4fbf-b17c-7bfa133e1c96/kube-rbac-proxy/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.192606 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-9fscr_8a48d8a2-3c00-4a6e-b88f-dab093355874/kube-rbac-proxy/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.253385 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-9fscr_8a48d8a2-3c00-4a6e-b88f-dab093355874/manager/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.319364 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-6tfrh_df86f3d1-75ea-4757-8115-1440d92160b6/kube-rbac-proxy/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.385291 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-6tfrh_df86f3d1-75ea-4757-8115-1440d92160b6/manager/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.456821 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.630428 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.633951 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.650810 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.798745 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/util/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.805260 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/pull/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.836066 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f6abe9d955c8b0019ff9d2fb85f18ddb453ac410a764cabb309dbef48ftcw7p_a984675f-9d67-4699-a5c4-819cda440d13/extract/0.log" Nov 24 18:08:27 crc kubenswrapper[4760]: I1124 18:08:27.984688 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-znhd6_e3c878c9-0549-4e8b-bb1a-2754b8a8d402/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.015598 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-dm4k7_abd30b3d-1e1d-4a1d-b4b6-aaf500949015/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.025173 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-znhd6_e3c878c9-0549-4e8b-bb1a-2754b8a8d402/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.165133 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-dm4k7_abd30b3d-1e1d-4a1d-b4b6-aaf500949015/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.249137 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-95w5b_981e3771-3dd1-4e3d-9601-7c16bbc22c8f/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.273073 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-95w5b_981e3771-3dd1-4e3d-9601-7c16bbc22c8f/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.426749 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-jxvzz_11f165ab-07bd-46ce-ad35-5b349c9b16be/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.630310 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-zd54m_93232e72-070f-4a46-89da-983cd8abe0b5/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.632150 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-jxvzz_11f165ab-07bd-46ce-ad35-5b349c9b16be/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.663485 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-zd54m_93232e72-070f-4a46-89da-983cd8abe0b5/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.894223 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-wgd79_c0da29f6-094e-499d-90ea-93ddfe52e165/manager/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.897278 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-wgd79_c0da29f6-094e-499d-90ea-93ddfe52e165/kube-rbac-proxy/0.log" Nov 24 18:08:28 crc kubenswrapper[4760]: I1124 18:08:28.899135 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-87pfs_d7eea786-ecee-41f0-9a52-7ac9bef2f874/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.080200 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-87pfs_d7eea786-ecee-41f0-9a52-7ac9bef2f874/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.097496 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-fl68l_a29a1da0-a007-4d2d-8ca2-0a3f78e4d995/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.109112 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-fl68l_a29a1da0-a007-4d2d-8ca2-0a3f78e4d995/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.327988 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-j6f4z_dc1dfda1-793b-4b06-a228-0e5472915f76/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.333668 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-j6f4z_dc1dfda1-793b-4b06-a228-0e5472915f76/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.537501 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-r7dzv_51bd5ae4-002b-40c4-bd9e-b6d087bfdaba/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.591674 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-r7dzv_51bd5ae4-002b-40c4-bd9e-b6d087bfdaba/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.606400 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-hn7wz_f43007f0-7615-44a1-8594-dd0b0adbded6/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.714349 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-hn7wz_f43007f0-7615-44a1-8594-dd0b0adbded6/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.775327 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm_2ebc4c96-b0e9-4f9f-950b-5af42b867a8a/manager/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.806991 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-j6rkm_2ebc4c96-b0e9-4f9f-950b-5af42b867a8a/kube-rbac-proxy/0.log" Nov 24 18:08:29 crc kubenswrapper[4760]: I1124 18:08:29.931955 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d5d9ddcff-zjhwp_ca3d8449-fd16-491b-bd2e-06dcd9103bdf/kube-rbac-proxy/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.169238 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-d5ff89cd9-mtcgb_20782ce3-a28a-4fa7-a4c1-ae186c4e9f44/kube-rbac-proxy/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.413192 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-d5ff89cd9-mtcgb_20782ce3-a28a-4fa7-a4c1-ae186c4e9f44/operator/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.415685 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7jdkl_baed48fd-5a3a-482e-a24e-2aff550b63dc/registry-server/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.656562 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-tjtzh_3ead61e1-d87a-44bb-8144-3198f06976c4/kube-rbac-proxy/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.739333 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-tjtzh_3ead61e1-d87a-44bb-8144-3198f06976c4/manager/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.915947 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-9cxcs_b075e65d-1bff-4853-9f78-339a20dde0d8/kube-rbac-proxy/0.log" Nov 24 18:08:30 crc kubenswrapper[4760]: I1124 18:08:30.921726 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-9cxcs_b075e65d-1bff-4853-9f78-339a20dde0d8/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.004350 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d5d9ddcff-zjhwp_ca3d8449-fd16-491b-bd2e-06dcd9103bdf/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.062351 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-b26r2_df2ff3b3-46c0-4a51-bac9-e19df21c24fa/operator/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.133966 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-mnszq_43e5759b-21f0-45be-a96b-c0c86229273f/kube-rbac-proxy/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.163470 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-mnszq_43e5759b-21f0-45be-a96b-c0c86229273f/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.237731 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d7678447-gcrcj_12583812-acca-4939-9358-17b4bb668450/kube-rbac-proxy/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.332239 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d7678447-gcrcj_12583812-acca-4939-9358-17b4bb668450/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.497557 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-qn926_5b5f6f3c-636d-4507-8c3d-51c1ac4693d6/kube-rbac-proxy/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.567671 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-qn926_5b5f6f3c-636d-4507-8c3d-51c1ac4693d6/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.627851 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-8jmvq_6a3853ba-f14b-4d13-96c5-7b7a590086ca/manager/0.log" Nov 24 18:08:31 crc kubenswrapper[4760]: I1124 18:08:31.653950 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-8jmvq_6a3853ba-f14b-4d13-96c5-7b7a590086ca/kube-rbac-proxy/0.log" Nov 24 18:08:34 crc kubenswrapper[4760]: I1124 18:08:34.467307 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:08:34 crc kubenswrapper[4760]: E1124 18:08:34.468294 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:08:46 crc kubenswrapper[4760]: I1124 18:08:46.845937 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-sldtw_a67e9663-0794-412b-b976-c0c50f39184e/control-plane-machine-set-operator/0.log" Nov 24 18:08:47 crc kubenswrapper[4760]: I1124 18:08:47.012722 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rt9dn_843e455c-4df4-4e25-91f1-456b61889db5/machine-api-operator/0.log" Nov 24 18:08:47 crc kubenswrapper[4760]: I1124 18:08:47.064694 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rt9dn_843e455c-4df4-4e25-91f1-456b61889db5/kube-rbac-proxy/0.log" Nov 24 18:08:49 crc kubenswrapper[4760]: I1124 18:08:49.466935 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:08:49 crc kubenswrapper[4760]: E1124 18:08:49.467723 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:08:58 crc kubenswrapper[4760]: I1124 18:08:58.903515 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-xtps2_bc25f619-5720-41cd-9fe6-beb030debe00/cert-manager-controller/0.log" Nov 24 18:08:59 crc kubenswrapper[4760]: I1124 18:08:59.031982 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-qn2xj_e64b9328-142b-47be-a2f9-9c2339244683/cert-manager-cainjector/0.log" Nov 24 18:08:59 crc kubenswrapper[4760]: I1124 18:08:59.124877 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-dcdmq_32a75cd6-7dcc-409b-9208-86578c121ec7/cert-manager-webhook/0.log" Nov 24 18:09:01 crc kubenswrapper[4760]: I1124 18:09:01.466393 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:09:01 crc kubenswrapper[4760]: E1124 18:09:01.467131 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:09:10 crc kubenswrapper[4760]: I1124 18:09:10.943426 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-w6tj7_5228db69-23c1-48fa-a89f-a4e0459bcdec/nmstate-console-plugin/0.log" Nov 24 18:09:11 crc kubenswrapper[4760]: I1124 18:09:11.044407 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nl86x_fe888a38-6db0-4cc7-b0e6-7eeb5ecbd7a3/nmstate-handler/0.log" Nov 24 18:09:11 crc kubenswrapper[4760]: I1124 18:09:11.135575 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-tgfj2_c02c0ef6-ec2f-4554-89d5-95ccd5a9af05/nmstate-metrics/0.log" Nov 24 18:09:11 crc kubenswrapper[4760]: I1124 18:09:11.151877 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-tgfj2_c02c0ef6-ec2f-4554-89d5-95ccd5a9af05/kube-rbac-proxy/0.log" Nov 24 18:09:11 crc kubenswrapper[4760]: I1124 18:09:11.328310 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-zn9lc_60a6fd21-1e4a-4eab-940a-157de6e7236e/nmstate-operator/0.log" Nov 24 18:09:11 crc kubenswrapper[4760]: I1124 18:09:11.342580 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-dgm2z_2a8eb5c7-a2fa-4029-9d10-9ef82f358506/nmstate-webhook/0.log" Nov 24 18:09:14 crc kubenswrapper[4760]: I1124 18:09:14.467258 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:09:14 crc kubenswrapper[4760]: E1124 18:09:14.468237 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.385730 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-r22lk_b2d37ad6-a6ac-4c40-82e2-4eb9319e9244/kube-rbac-proxy/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.463157 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-r22lk_b2d37ad6-a6ac-4c40-82e2-4eb9319e9244/controller/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.587950 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.731222 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.738364 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.767253 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.788415 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.929647 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.974105 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.974877 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:09:24 crc kubenswrapper[4760]: I1124 18:09:24.998704 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.152128 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-frr-files/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.167450 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-reloader/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.168563 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/cp-metrics/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.207898 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/controller/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.332181 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/kube-rbac-proxy/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.377390 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/frr-metrics/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.405661 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/kube-rbac-proxy-frr/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.869397 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-qk694_858bafc0-44a3-4e65-9a8f-0da3e8d6f624/frr-k8s-webhook-server/0.log" Nov 24 18:09:25 crc kubenswrapper[4760]: I1124 18:09:25.892014 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/reloader/0.log" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.176549 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6cbb78f8d9-xsz89_9608978e-3402-4aa6-97aa-c15d47a81890/manager/0.log" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.313875 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-558dddbf45-rtbw7_9c952901-3384-4ff2-a54a-28b709c934a7/webhook-server/0.log" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.411733 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z7h4x_576e8cbe-2d96-43c5-a62c-d4f22abdc21a/kube-rbac-proxy/0.log" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.466792 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:09:26 crc kubenswrapper[4760]: E1124 18:09:26.467086 4760 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vgbxz_openshift-machine-config-operator(f71fb2ac-0373-4606-a20a-0b60ca26fbc3)\"" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.485735 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-j4nl8_9debe41b-d028-4243-be0e-8d191f93d290/frr/0.log" Nov 24 18:09:26 crc kubenswrapper[4760]: I1124 18:09:26.779059 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z7h4x_576e8cbe-2d96-43c5-a62c-d4f22abdc21a/speaker/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.466810 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.470573 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.694275 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.714330 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.734808 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.893745 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/extract/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.895649 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/util/0.log" Nov 24 18:09:39 crc kubenswrapper[4760]: I1124 18:09:39.899088 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eh6vrn_0f244ccb-ab02-43bc-8cd5-645c33d953b9/pull/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.078848 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.095224 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"09f88cffd510c8136a8ee378ab670190ef3e9a77919ac0344962af452d0ffcda"} Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.281023 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.307312 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.315654 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.493799 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-content/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.497875 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/extract-utilities/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.744253 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.971488 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:09:40 crc kubenswrapper[4760]: I1124 18:09:40.985712 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.033806 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.036716 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2kfvr_3e811c73-6a8f-42b7-9a9c-4a062f6313cb/registry-server/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.187828 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-utilities/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.197919 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/extract-content/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.375290 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.590284 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.658898 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.666265 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.780501 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r2bbn_b10118dc-03fc-435c-8510-00a210c546a4/registry-server/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.821648 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/util/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.862632 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/pull/0.log" Nov 24 18:09:41 crc kubenswrapper[4760]: I1124 18:09:41.910035 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mpp79_ea4e0bc2-6410-4a4a-9fca-104f010a54e7/extract/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.014258 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-jfn9q_3947261e-1d34-46c9-a769-f71d6e03f7d1/marketplace-operator/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.318879 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.464582 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.470082 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.481528 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.628780 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-utilities/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.691634 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/extract-content/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.838603 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-p2cws_56c164f7-0218-4d51-af82-508b2f979a6f/registry-server/0.log" Nov 24 18:09:42 crc kubenswrapper[4760]: I1124 18:09:42.842380 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.083056 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.092896 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.094523 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.221242 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-utilities/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.262535 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/extract-content/0.log" Nov 24 18:09:43 crc kubenswrapper[4760]: I1124 18:09:43.719776 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4nwzh_eb9f8a49-7730-403e-bf3a-7aefc7e44b93/registry-server/0.log" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.332487 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:20 crc kubenswrapper[4760]: E1124 18:11:20.333746 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3dca026-5c42-4393-b0da-63d2fb7d8e5e" containerName="container-00" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.333767 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3dca026-5c42-4393-b0da-63d2fb7d8e5e" containerName="container-00" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.334236 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3dca026-5c42-4393-b0da-63d2fb7d8e5e" containerName="container-00" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.336728 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.357269 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.418940 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.419112 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cxt6\" (UniqueName: \"kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.419405 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.521498 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.521548 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cxt6\" (UniqueName: \"kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.521667 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.523728 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.524271 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.544618 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cxt6\" (UniqueName: \"kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6\") pod \"community-operators-hg42n\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:20 crc kubenswrapper[4760]: I1124 18:11:20.670268 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:21 crc kubenswrapper[4760]: I1124 18:11:21.245735 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:22 crc kubenswrapper[4760]: I1124 18:11:22.166380 4760 generic.go:334] "Generic (PLEG): container finished" podID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerID="9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5" exitCode=0 Nov 24 18:11:22 crc kubenswrapper[4760]: I1124 18:11:22.166499 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerDied","Data":"9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5"} Nov 24 18:11:22 crc kubenswrapper[4760]: I1124 18:11:22.166750 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerStarted","Data":"d1904c40fc2998380573be29159b69146f05475522179f4ca2a83356cae9cb03"} Nov 24 18:11:22 crc kubenswrapper[4760]: I1124 18:11:22.168146 4760 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 18:11:23 crc kubenswrapper[4760]: I1124 18:11:23.176472 4760 generic.go:334] "Generic (PLEG): container finished" podID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerID="b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207" exitCode=0 Nov 24 18:11:23 crc kubenswrapper[4760]: I1124 18:11:23.176561 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" event={"ID":"a6ec6e50-7fdc-459f-96b3-ed244e7da412","Type":"ContainerDied","Data":"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207"} Nov 24 18:11:23 crc kubenswrapper[4760]: I1124 18:11:23.177559 4760 scope.go:117] "RemoveContainer" containerID="b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207" Nov 24 18:11:23 crc kubenswrapper[4760]: I1124 18:11:23.179188 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerStarted","Data":"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708"} Nov 24 18:11:23 crc kubenswrapper[4760]: I1124 18:11:23.634246 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-9p2gn_must-gather-5t4fq_a6ec6e50-7fdc-459f-96b3-ed244e7da412/gather/0.log" Nov 24 18:11:24 crc kubenswrapper[4760]: I1124 18:11:24.189291 4760 generic.go:334] "Generic (PLEG): container finished" podID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerID="c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708" exitCode=0 Nov 24 18:11:24 crc kubenswrapper[4760]: I1124 18:11:24.189334 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerDied","Data":"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708"} Nov 24 18:11:25 crc kubenswrapper[4760]: I1124 18:11:25.199831 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerStarted","Data":"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8"} Nov 24 18:11:25 crc kubenswrapper[4760]: I1124 18:11:25.263252 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hg42n" podStartSLOduration=2.783322321 podStartE2EDuration="5.263231854s" podCreationTimestamp="2025-11-24 18:11:20 +0000 UTC" firstStartedPulling="2025-11-24 18:11:22.167843673 +0000 UTC m=+4077.490725223" lastFinishedPulling="2025-11-24 18:11:24.647753196 +0000 UTC m=+4079.970634756" observedRunningTime="2025-11-24 18:11:25.219314675 +0000 UTC m=+4080.542196225" watchObservedRunningTime="2025-11-24 18:11:25.263231854 +0000 UTC m=+4080.586113474" Nov 24 18:11:30 crc kubenswrapper[4760]: I1124 18:11:30.671060 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:30 crc kubenswrapper[4760]: I1124 18:11:30.671698 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:30 crc kubenswrapper[4760]: I1124 18:11:30.737196 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:31 crc kubenswrapper[4760]: I1124 18:11:31.332809 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:31 crc kubenswrapper[4760]: I1124 18:11:31.378291 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.279564 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hg42n" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="registry-server" containerID="cri-o://b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8" gracePeriod=2 Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.302416 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-9p2gn/must-gather-5t4fq"] Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.302785 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="copy" containerID="cri-o://d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7" gracePeriod=2 Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.321130 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-9p2gn/must-gather-5t4fq"] Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.950617 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.955864 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-9p2gn_must-gather-5t4fq_a6ec6e50-7fdc-459f-96b3-ed244e7da412/copy/0.log" Nov 24 18:11:33 crc kubenswrapper[4760]: I1124 18:11:33.956237 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.032127 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output\") pod \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.032202 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9f2vg\" (UniqueName: \"kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg\") pod \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\" (UID: \"a6ec6e50-7fdc-459f-96b3-ed244e7da412\") " Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.032220 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cxt6\" (UniqueName: \"kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6\") pod \"a21ec251-5581-4d83-8df4-f7134c1b695e\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.032247 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities\") pod \"a21ec251-5581-4d83-8df4-f7134c1b695e\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.032310 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content\") pod \"a21ec251-5581-4d83-8df4-f7134c1b695e\" (UID: \"a21ec251-5581-4d83-8df4-f7134c1b695e\") " Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.033422 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities" (OuterVolumeSpecName: "utilities") pod "a21ec251-5581-4d83-8df4-f7134c1b695e" (UID: "a21ec251-5581-4d83-8df4-f7134c1b695e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.038939 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6" (OuterVolumeSpecName: "kube-api-access-6cxt6") pod "a21ec251-5581-4d83-8df4-f7134c1b695e" (UID: "a21ec251-5581-4d83-8df4-f7134c1b695e"). InnerVolumeSpecName "kube-api-access-6cxt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.043855 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg" (OuterVolumeSpecName: "kube-api-access-9f2vg") pod "a6ec6e50-7fdc-459f-96b3-ed244e7da412" (UID: "a6ec6e50-7fdc-459f-96b3-ed244e7da412"). InnerVolumeSpecName "kube-api-access-9f2vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.134358 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9f2vg\" (UniqueName: \"kubernetes.io/projected/a6ec6e50-7fdc-459f-96b3-ed244e7da412-kube-api-access-9f2vg\") on node \"crc\" DevicePath \"\"" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.134398 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cxt6\" (UniqueName: \"kubernetes.io/projected/a21ec251-5581-4d83-8df4-f7134c1b695e-kube-api-access-6cxt6\") on node \"crc\" DevicePath \"\"" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.134411 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.206638 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "a6ec6e50-7fdc-459f-96b3-ed244e7da412" (UID: "a6ec6e50-7fdc-459f-96b3-ed244e7da412"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.236528 4760 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a6ec6e50-7fdc-459f-96b3-ed244e7da412-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.288776 4760 generic.go:334] "Generic (PLEG): container finished" podID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerID="b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8" exitCode=0 Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.288839 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hg42n" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.288884 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerDied","Data":"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8"} Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.288937 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hg42n" event={"ID":"a21ec251-5581-4d83-8df4-f7134c1b695e","Type":"ContainerDied","Data":"d1904c40fc2998380573be29159b69146f05475522179f4ca2a83356cae9cb03"} Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.288976 4760 scope.go:117] "RemoveContainer" containerID="b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.290840 4760 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-9p2gn_must-gather-5t4fq_a6ec6e50-7fdc-459f-96b3-ed244e7da412/copy/0.log" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.291327 4760 generic.go:334] "Generic (PLEG): container finished" podID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerID="d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7" exitCode=143 Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.291372 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-9p2gn/must-gather-5t4fq" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.308850 4760 scope.go:117] "RemoveContainer" containerID="c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.348458 4760 scope.go:117] "RemoveContainer" containerID="9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.366696 4760 scope.go:117] "RemoveContainer" containerID="b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8" Nov 24 18:11:34 crc kubenswrapper[4760]: E1124 18:11:34.367281 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8\": container with ID starting with b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8 not found: ID does not exist" containerID="b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.367366 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8"} err="failed to get container status \"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8\": rpc error: code = NotFound desc = could not find container \"b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8\": container with ID starting with b9f4171270293ff64ad4a3e21806d61cd421411f6ab1e2668d233c3f14ec40e8 not found: ID does not exist" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.367398 4760 scope.go:117] "RemoveContainer" containerID="c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708" Nov 24 18:11:34 crc kubenswrapper[4760]: E1124 18:11:34.367829 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708\": container with ID starting with c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708 not found: ID does not exist" containerID="c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.367868 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708"} err="failed to get container status \"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708\": rpc error: code = NotFound desc = could not find container \"c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708\": container with ID starting with c3cd669a3387a2531e570d5d7b7c86f00e9403c2a1d49a1787e11903abd98708 not found: ID does not exist" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.367895 4760 scope.go:117] "RemoveContainer" containerID="9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5" Nov 24 18:11:34 crc kubenswrapper[4760]: E1124 18:11:34.368741 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5\": container with ID starting with 9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5 not found: ID does not exist" containerID="9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.368769 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5"} err="failed to get container status \"9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5\": rpc error: code = NotFound desc = could not find container \"9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5\": container with ID starting with 9ad3cb18c9ea13dec06c51128facfa5febf6351d009db670742a2381cfc51ba5 not found: ID does not exist" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.368784 4760 scope.go:117] "RemoveContainer" containerID="d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.387841 4760 scope.go:117] "RemoveContainer" containerID="b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.496839 4760 scope.go:117] "RemoveContainer" containerID="d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7" Nov 24 18:11:34 crc kubenswrapper[4760]: E1124 18:11:34.497472 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7\": container with ID starting with d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7 not found: ID does not exist" containerID="d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.497525 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7"} err="failed to get container status \"d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7\": rpc error: code = NotFound desc = could not find container \"d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7\": container with ID starting with d0a0691af5f09f563175c91ca9b87ae73288c48f27db7283212dce29298fdbc7 not found: ID does not exist" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.497559 4760 scope.go:117] "RemoveContainer" containerID="b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207" Nov 24 18:11:34 crc kubenswrapper[4760]: E1124 18:11:34.497826 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207\": container with ID starting with b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207 not found: ID does not exist" containerID="b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.497851 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207"} err="failed to get container status \"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207\": rpc error: code = NotFound desc = could not find container \"b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207\": container with ID starting with b6c270384faae40262db46db4b2e589e1ee7fdf9eae62c79f5407b819d965207 not found: ID does not exist" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.681722 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a21ec251-5581-4d83-8df4-f7134c1b695e" (UID: "a21ec251-5581-4d83-8df4-f7134c1b695e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.745715 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21ec251-5581-4d83-8df4-f7134c1b695e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.919798 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:34 crc kubenswrapper[4760]: I1124 18:11:34.928460 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hg42n"] Nov 24 18:11:35 crc kubenswrapper[4760]: I1124 18:11:35.481141 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" path="/var/lib/kubelet/pods/a21ec251-5581-4d83-8df4-f7134c1b695e/volumes" Nov 24 18:11:35 crc kubenswrapper[4760]: I1124 18:11:35.482996 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" path="/var/lib/kubelet/pods/a6ec6e50-7fdc-459f-96b3-ed244e7da412/volumes" Nov 24 18:12:05 crc kubenswrapper[4760]: I1124 18:12:05.642913 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:12:05 crc kubenswrapper[4760]: I1124 18:12:05.644094 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:12:35 crc kubenswrapper[4760]: I1124 18:12:35.642483 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:12:35 crc kubenswrapper[4760]: I1124 18:12:35.643062 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:13:05 crc kubenswrapper[4760]: I1124 18:13:05.642674 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:13:05 crc kubenswrapper[4760]: I1124 18:13:05.643356 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:13:05 crc kubenswrapper[4760]: I1124 18:13:05.643427 4760 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" Nov 24 18:13:05 crc kubenswrapper[4760]: I1124 18:13:05.644573 4760 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09f88cffd510c8136a8ee378ab670190ef3e9a77919ac0344962af452d0ffcda"} pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 18:13:05 crc kubenswrapper[4760]: I1124 18:13:05.644726 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" containerID="cri-o://09f88cffd510c8136a8ee378ab670190ef3e9a77919ac0344962af452d0ffcda" gracePeriod=600 Nov 24 18:13:06 crc kubenswrapper[4760]: I1124 18:13:06.225399 4760 generic.go:334] "Generic (PLEG): container finished" podID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerID="09f88cffd510c8136a8ee378ab670190ef3e9a77919ac0344962af452d0ffcda" exitCode=0 Nov 24 18:13:06 crc kubenswrapper[4760]: I1124 18:13:06.225488 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerDied","Data":"09f88cffd510c8136a8ee378ab670190ef3e9a77919ac0344962af452d0ffcda"} Nov 24 18:13:06 crc kubenswrapper[4760]: I1124 18:13:06.225902 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" event={"ID":"f71fb2ac-0373-4606-a20a-0b60ca26fbc3","Type":"ContainerStarted","Data":"d8ad96f3e27f2930e5f3c3f349057d1994af9525c97be34dc3403ad35af47afe"} Nov 24 18:13:06 crc kubenswrapper[4760]: I1124 18:13:06.225925 4760 scope.go:117] "RemoveContainer" containerID="2f56acd01516dbbb59e34a2f9808df18097584f3dfc4bacc8c3a47d3bcb2a13b" Nov 24 18:13:17 crc kubenswrapper[4760]: I1124 18:13:17.303509 4760 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-69f4488969-xwpx8" podUID="37d3f873-9ed8-47d6-b62d-3b007dca3936" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.226047 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:28 crc kubenswrapper[4760]: E1124 18:13:28.227188 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="copy" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227203 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="copy" Nov 24 18:13:28 crc kubenswrapper[4760]: E1124 18:13:28.227216 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="registry-server" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227223 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="registry-server" Nov 24 18:13:28 crc kubenswrapper[4760]: E1124 18:13:28.227230 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="extract-utilities" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227238 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="extract-utilities" Nov 24 18:13:28 crc kubenswrapper[4760]: E1124 18:13:28.227263 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="gather" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227269 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="gather" Nov 24 18:13:28 crc kubenswrapper[4760]: E1124 18:13:28.227280 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="extract-content" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227286 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="extract-content" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227449 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="gather" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227458 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a21ec251-5581-4d83-8df4-f7134c1b695e" containerName="registry-server" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.227479 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6ec6e50-7fdc-459f-96b3-ed244e7da412" containerName="copy" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.228772 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.255617 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.386774 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.386972 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78z4k\" (UniqueName: \"kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.387077 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.488431 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78z4k\" (UniqueName: \"kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.488535 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.488588 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.489197 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.489240 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.507203 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78z4k\" (UniqueName: \"kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k\") pod \"certified-operators-24dkf\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:28 crc kubenswrapper[4760]: I1124 18:13:28.580683 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:29 crc kubenswrapper[4760]: I1124 18:13:29.086156 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:29 crc kubenswrapper[4760]: I1124 18:13:29.510573 4760 generic.go:334] "Generic (PLEG): container finished" podID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerID="aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674" exitCode=0 Nov 24 18:13:29 crc kubenswrapper[4760]: I1124 18:13:29.510656 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerDied","Data":"aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674"} Nov 24 18:13:29 crc kubenswrapper[4760]: I1124 18:13:29.510707 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerStarted","Data":"2bb4f937147ce4ead6d141b70c07d41449e78a06f54ad95e3698fbc9d31dbc75"} Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.624407 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.626871 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.627978 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.628225 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.628350 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmx8l\" (UniqueName: \"kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.639703 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.730542 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.730987 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmx8l\" (UniqueName: \"kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.731011 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.731253 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.731704 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:30 crc kubenswrapper[4760]: I1124 18:13:30.754007 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmx8l\" (UniqueName: \"kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l\") pod \"redhat-marketplace-tf7vk\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:31 crc kubenswrapper[4760]: I1124 18:13:31.010380 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:31 crc kubenswrapper[4760]: W1124 18:13:31.491048 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83137fa1_0d2e_4f9c_8831_c546f37947aa.slice/crio-13fdb3caa54eb2e64870af1a653f7a1347730f5adbc72bf1a3f48d0ece7c3d2d WatchSource:0}: Error finding container 13fdb3caa54eb2e64870af1a653f7a1347730f5adbc72bf1a3f48d0ece7c3d2d: Status 404 returned error can't find the container with id 13fdb3caa54eb2e64870af1a653f7a1347730f5adbc72bf1a3f48d0ece7c3d2d Nov 24 18:13:31 crc kubenswrapper[4760]: I1124 18:13:31.491828 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:31 crc kubenswrapper[4760]: I1124 18:13:31.531906 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerStarted","Data":"13fdb3caa54eb2e64870af1a653f7a1347730f5adbc72bf1a3f48d0ece7c3d2d"} Nov 24 18:13:31 crc kubenswrapper[4760]: I1124 18:13:31.534480 4760 generic.go:334] "Generic (PLEG): container finished" podID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerID="415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100" exitCode=0 Nov 24 18:13:31 crc kubenswrapper[4760]: I1124 18:13:31.534627 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerDied","Data":"415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100"} Nov 24 18:13:32 crc kubenswrapper[4760]: I1124 18:13:32.550736 4760 generic.go:334] "Generic (PLEG): container finished" podID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerID="a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82" exitCode=0 Nov 24 18:13:32 crc kubenswrapper[4760]: I1124 18:13:32.550796 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerDied","Data":"a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82"} Nov 24 18:13:34 crc kubenswrapper[4760]: I1124 18:13:34.569635 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerStarted","Data":"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9"} Nov 24 18:13:34 crc kubenswrapper[4760]: I1124 18:13:34.571559 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerDied","Data":"12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b"} Nov 24 18:13:34 crc kubenswrapper[4760]: I1124 18:13:34.573710 4760 generic.go:334] "Generic (PLEG): container finished" podID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerID="12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b" exitCode=0 Nov 24 18:13:34 crc kubenswrapper[4760]: I1124 18:13:34.599439 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-24dkf" podStartSLOduration=2.874546595 podStartE2EDuration="6.599415012s" podCreationTimestamp="2025-11-24 18:13:28 +0000 UTC" firstStartedPulling="2025-11-24 18:13:29.513614572 +0000 UTC m=+4204.836496162" lastFinishedPulling="2025-11-24 18:13:33.238483029 +0000 UTC m=+4208.561364579" observedRunningTime="2025-11-24 18:13:34.588769499 +0000 UTC m=+4209.911651059" watchObservedRunningTime="2025-11-24 18:13:34.599415012 +0000 UTC m=+4209.922296572" Nov 24 18:13:36 crc kubenswrapper[4760]: I1124 18:13:36.623288 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerStarted","Data":"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9"} Nov 24 18:13:36 crc kubenswrapper[4760]: I1124 18:13:36.659323 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tf7vk" podStartSLOduration=4.251294499 podStartE2EDuration="6.659288276s" podCreationTimestamp="2025-11-24 18:13:30 +0000 UTC" firstStartedPulling="2025-11-24 18:13:32.553886374 +0000 UTC m=+4207.876767934" lastFinishedPulling="2025-11-24 18:13:34.961880161 +0000 UTC m=+4210.284761711" observedRunningTime="2025-11-24 18:13:36.654729616 +0000 UTC m=+4211.977611176" watchObservedRunningTime="2025-11-24 18:13:36.659288276 +0000 UTC m=+4211.982169866" Nov 24 18:13:38 crc kubenswrapper[4760]: I1124 18:13:38.581164 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:38 crc kubenswrapper[4760]: I1124 18:13:38.581802 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:38 crc kubenswrapper[4760]: I1124 18:13:38.673623 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:38 crc kubenswrapper[4760]: I1124 18:13:38.752727 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:39 crc kubenswrapper[4760]: I1124 18:13:39.011873 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:40 crc kubenswrapper[4760]: I1124 18:13:40.669583 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-24dkf" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="registry-server" containerID="cri-o://0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9" gracePeriod=2 Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.011444 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.011686 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.064440 4760 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.147989 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.343313 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78z4k\" (UniqueName: \"kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k\") pod \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.343528 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities\") pod \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.343560 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content\") pod \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\" (UID: \"66edd3ad-2901-44a6-9a31-69b2da7f41dc\") " Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.346111 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities" (OuterVolumeSpecName: "utilities") pod "66edd3ad-2901-44a6-9a31-69b2da7f41dc" (UID: "66edd3ad-2901-44a6-9a31-69b2da7f41dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.349174 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k" (OuterVolumeSpecName: "kube-api-access-78z4k") pod "66edd3ad-2901-44a6-9a31-69b2da7f41dc" (UID: "66edd3ad-2901-44a6-9a31-69b2da7f41dc"). InnerVolumeSpecName "kube-api-access-78z4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.393227 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66edd3ad-2901-44a6-9a31-69b2da7f41dc" (UID: "66edd3ad-2901-44a6-9a31-69b2da7f41dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.446300 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78z4k\" (UniqueName: \"kubernetes.io/projected/66edd3ad-2901-44a6-9a31-69b2da7f41dc-kube-api-access-78z4k\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.446352 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.446371 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66edd3ad-2901-44a6-9a31-69b2da7f41dc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.684882 4760 generic.go:334] "Generic (PLEG): container finished" podID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerID="0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9" exitCode=0 Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.684931 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24dkf" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.684957 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerDied","Data":"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9"} Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.685091 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24dkf" event={"ID":"66edd3ad-2901-44a6-9a31-69b2da7f41dc","Type":"ContainerDied","Data":"2bb4f937147ce4ead6d141b70c07d41449e78a06f54ad95e3698fbc9d31dbc75"} Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.685115 4760 scope.go:117] "RemoveContainer" containerID="0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.712687 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.715437 4760 scope.go:117] "RemoveContainer" containerID="415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.720540 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-24dkf"] Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.737803 4760 scope.go:117] "RemoveContainer" containerID="aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.750601 4760 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.790545 4760 scope.go:117] "RemoveContainer" containerID="0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9" Nov 24 18:13:41 crc kubenswrapper[4760]: E1124 18:13:41.791369 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9\": container with ID starting with 0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9 not found: ID does not exist" containerID="0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.791427 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9"} err="failed to get container status \"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9\": rpc error: code = NotFound desc = could not find container \"0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9\": container with ID starting with 0c442a81f0ecd867ae7d2c9491518c79ec6e86eb1e63eca12bf1805ceef636b9 not found: ID does not exist" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.791459 4760 scope.go:117] "RemoveContainer" containerID="415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100" Nov 24 18:13:41 crc kubenswrapper[4760]: E1124 18:13:41.793619 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100\": container with ID starting with 415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100 not found: ID does not exist" containerID="415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.793654 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100"} err="failed to get container status \"415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100\": rpc error: code = NotFound desc = could not find container \"415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100\": container with ID starting with 415470d17d771342b99e34c887ce355a319331f3fb1921e6993eeedeb56a8100 not found: ID does not exist" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.793677 4760 scope.go:117] "RemoveContainer" containerID="aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674" Nov 24 18:13:41 crc kubenswrapper[4760]: E1124 18:13:41.794379 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674\": container with ID starting with aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674 not found: ID does not exist" containerID="aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674" Nov 24 18:13:41 crc kubenswrapper[4760]: I1124 18:13:41.794412 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674"} err="failed to get container status \"aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674\": rpc error: code = NotFound desc = could not find container \"aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674\": container with ID starting with aff75bb398e409cac7486d934fecf87e9ab1bafd47a7d7cd70470de980c21674 not found: ID does not exist" Nov 24 18:13:43 crc kubenswrapper[4760]: I1124 18:13:43.479281 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" path="/var/lib/kubelet/pods/66edd3ad-2901-44a6-9a31-69b2da7f41dc/volumes" Nov 24 18:13:44 crc kubenswrapper[4760]: I1124 18:13:44.019641 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:44 crc kubenswrapper[4760]: I1124 18:13:44.721951 4760 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tf7vk" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="registry-server" containerID="cri-o://b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9" gracePeriod=2 Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.665704 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.732836 4760 generic.go:334] "Generic (PLEG): container finished" podID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerID="b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9" exitCode=0 Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.732874 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerDied","Data":"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9"} Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.732900 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tf7vk" event={"ID":"83137fa1-0d2e-4f9c-8831-c546f37947aa","Type":"ContainerDied","Data":"13fdb3caa54eb2e64870af1a653f7a1347730f5adbc72bf1a3f48d0ece7c3d2d"} Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.732920 4760 scope.go:117] "RemoveContainer" containerID="b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.732932 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tf7vk" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.747728 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities\") pod \"83137fa1-0d2e-4f9c-8831-c546f37947aa\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.747812 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmx8l\" (UniqueName: \"kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l\") pod \"83137fa1-0d2e-4f9c-8831-c546f37947aa\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.747951 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content\") pod \"83137fa1-0d2e-4f9c-8831-c546f37947aa\" (UID: \"83137fa1-0d2e-4f9c-8831-c546f37947aa\") " Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.748774 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities" (OuterVolumeSpecName: "utilities") pod "83137fa1-0d2e-4f9c-8831-c546f37947aa" (UID: "83137fa1-0d2e-4f9c-8831-c546f37947aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.759574 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l" (OuterVolumeSpecName: "kube-api-access-kmx8l") pod "83137fa1-0d2e-4f9c-8831-c546f37947aa" (UID: "83137fa1-0d2e-4f9c-8831-c546f37947aa"). InnerVolumeSpecName "kube-api-access-kmx8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.762231 4760 scope.go:117] "RemoveContainer" containerID="12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.773284 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83137fa1-0d2e-4f9c-8831-c546f37947aa" (UID: "83137fa1-0d2e-4f9c-8831-c546f37947aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.820074 4760 scope.go:117] "RemoveContainer" containerID="a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.850027 4760 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.850062 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmx8l\" (UniqueName: \"kubernetes.io/projected/83137fa1-0d2e-4f9c-8831-c546f37947aa-kube-api-access-kmx8l\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.850073 4760 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83137fa1-0d2e-4f9c-8831-c546f37947aa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.856381 4760 scope.go:117] "RemoveContainer" containerID="b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9" Nov 24 18:13:45 crc kubenswrapper[4760]: E1124 18:13:45.856868 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9\": container with ID starting with b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9 not found: ID does not exist" containerID="b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.856903 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9"} err="failed to get container status \"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9\": rpc error: code = NotFound desc = could not find container \"b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9\": container with ID starting with b9b56d3e52cfde9d3cb7a244fb47ef9db23df94fde0ac708e650985f68f27ad9 not found: ID does not exist" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.856926 4760 scope.go:117] "RemoveContainer" containerID="12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b" Nov 24 18:13:45 crc kubenswrapper[4760]: E1124 18:13:45.857394 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b\": container with ID starting with 12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b not found: ID does not exist" containerID="12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.857444 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b"} err="failed to get container status \"12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b\": rpc error: code = NotFound desc = could not find container \"12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b\": container with ID starting with 12d5182df1b4382d66f9030f62229ac19d3dd630970ea7fcbd453089f4140b2b not found: ID does not exist" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.857479 4760 scope.go:117] "RemoveContainer" containerID="a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82" Nov 24 18:13:45 crc kubenswrapper[4760]: E1124 18:13:45.857762 4760 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82\": container with ID starting with a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82 not found: ID does not exist" containerID="a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82" Nov 24 18:13:45 crc kubenswrapper[4760]: I1124 18:13:45.857786 4760 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82"} err="failed to get container status \"a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82\": rpc error: code = NotFound desc = could not find container \"a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82\": container with ID starting with a98860dca3a9a91a557168c0605af740d4525fdeb61f33e390ad562ce776bd82 not found: ID does not exist" Nov 24 18:13:46 crc kubenswrapper[4760]: I1124 18:13:46.078908 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:46 crc kubenswrapper[4760]: I1124 18:13:46.089773 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tf7vk"] Nov 24 18:13:47 crc kubenswrapper[4760]: I1124 18:13:47.485421 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" path="/var/lib/kubelet/pods/83137fa1-0d2e-4f9c-8831-c546f37947aa/volumes" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.163332 4760 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w"] Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164402 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164420 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164440 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164448 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164471 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164480 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164493 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164501 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164512 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164519 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="extract-utilities" Nov 24 18:15:00 crc kubenswrapper[4760]: E1124 18:15:00.164541 4760 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164548 4760 state_mem.go:107] "Deleted CPUSet assignment" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="extract-content" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164724 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="83137fa1-0d2e-4f9c-8831-c546f37947aa" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.164737 4760 memory_manager.go:354] "RemoveStaleState removing state" podUID="66edd3ad-2901-44a6-9a31-69b2da7f41dc" containerName="registry-server" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.165416 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.167963 4760 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.168729 4760 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.177403 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w"] Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.288316 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c2dv\" (UniqueName: \"kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.288478 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.288512 4760 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.389826 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.389879 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.390028 4760 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c2dv\" (UniqueName: \"kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.391467 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.400693 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.410445 4760 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c2dv\" (UniqueName: \"kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv\") pod \"collect-profiles-29400135-7zt8w\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:00 crc kubenswrapper[4760]: I1124 18:15:00.496158 4760 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:01 crc kubenswrapper[4760]: I1124 18:15:00.965281 4760 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w"] Nov 24 18:15:01 crc kubenswrapper[4760]: W1124 18:15:00.969972 4760 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c568b35_f2d6_4d08_9f67_527b49d96433.slice/crio-e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424 WatchSource:0}: Error finding container e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424: Status 404 returned error can't find the container with id e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424 Nov 24 18:15:01 crc kubenswrapper[4760]: I1124 18:15:01.566329 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" event={"ID":"4c568b35-f2d6-4d08-9f67-527b49d96433","Type":"ContainerStarted","Data":"1943fded99a278285ae9f65dc99dd014bdde9d466434ace14a8aaa85469f8cda"} Nov 24 18:15:01 crc kubenswrapper[4760]: I1124 18:15:01.566653 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" event={"ID":"4c568b35-f2d6-4d08-9f67-527b49d96433","Type":"ContainerStarted","Data":"e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424"} Nov 24 18:15:01 crc kubenswrapper[4760]: I1124 18:15:01.584077 4760 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" podStartSLOduration=1.584061026 podStartE2EDuration="1.584061026s" podCreationTimestamp="2025-11-24 18:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 18:15:01.578830388 +0000 UTC m=+4296.901711968" watchObservedRunningTime="2025-11-24 18:15:01.584061026 +0000 UTC m=+4296.906942576" Nov 24 18:15:02 crc kubenswrapper[4760]: I1124 18:15:02.579367 4760 generic.go:334] "Generic (PLEG): container finished" podID="4c568b35-f2d6-4d08-9f67-527b49d96433" containerID="1943fded99a278285ae9f65dc99dd014bdde9d466434ace14a8aaa85469f8cda" exitCode=0 Nov 24 18:15:02 crc kubenswrapper[4760]: I1124 18:15:02.579438 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" event={"ID":"4c568b35-f2d6-4d08-9f67-527b49d96433","Type":"ContainerDied","Data":"1943fded99a278285ae9f65dc99dd014bdde9d466434ace14a8aaa85469f8cda"} Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.606459 4760 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" event={"ID":"4c568b35-f2d6-4d08-9f67-527b49d96433","Type":"ContainerDied","Data":"e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424"} Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.607113 4760 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e00dda0e1ce294c3aab3f56c725319736169d5f05fa78ec003664136c86bd424" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.662650 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.785923 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5c2dv\" (UniqueName: \"kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv\") pod \"4c568b35-f2d6-4d08-9f67-527b49d96433\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.786979 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume\") pod \"4c568b35-f2d6-4d08-9f67-527b49d96433\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.787069 4760 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume\") pod \"4c568b35-f2d6-4d08-9f67-527b49d96433\" (UID: \"4c568b35-f2d6-4d08-9f67-527b49d96433\") " Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.787794 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume" (OuterVolumeSpecName: "config-volume") pod "4c568b35-f2d6-4d08-9f67-527b49d96433" (UID: "4c568b35-f2d6-4d08-9f67-527b49d96433"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.798694 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4c568b35-f2d6-4d08-9f67-527b49d96433" (UID: "4c568b35-f2d6-4d08-9f67-527b49d96433"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.798716 4760 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv" (OuterVolumeSpecName: "kube-api-access-5c2dv") pod "4c568b35-f2d6-4d08-9f67-527b49d96433" (UID: "4c568b35-f2d6-4d08-9f67-527b49d96433"). InnerVolumeSpecName "kube-api-access-5c2dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.889715 4760 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5c2dv\" (UniqueName: \"kubernetes.io/projected/4c568b35-f2d6-4d08-9f67-527b49d96433-kube-api-access-5c2dv\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.889752 4760 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4c568b35-f2d6-4d08-9f67-527b49d96433-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:04 crc kubenswrapper[4760]: I1124 18:15:04.889764 4760 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4c568b35-f2d6-4d08-9f67-527b49d96433-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 18:15:05 crc kubenswrapper[4760]: I1124 18:15:05.618536 4760 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29400135-7zt8w" Nov 24 18:15:05 crc kubenswrapper[4760]: I1124 18:15:05.643433 4760 patch_prober.go:28] interesting pod/machine-config-daemon-vgbxz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 18:15:05 crc kubenswrapper[4760]: I1124 18:15:05.643545 4760 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vgbxz" podUID="f71fb2ac-0373-4606-a20a-0b60ca26fbc3" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 18:15:05 crc kubenswrapper[4760]: I1124 18:15:05.744591 4760 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt"] Nov 24 18:15:05 crc kubenswrapper[4760]: I1124 18:15:05.755134 4760 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29400090-7d4mt"] Nov 24 18:15:07 crc kubenswrapper[4760]: I1124 18:15:07.480167 4760 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1daffafe-fc2d-4f64-af17-6c6cbbe5bd81" path="/var/lib/kubelet/pods/1daffafe-fc2d-4f64-af17-6c6cbbe5bd81/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111120310024426 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111120310017343 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111107407016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111107410015444 5ustar corecore